diff --git a/Gopkg.lock b/Gopkg.lock index 16ec85b9..40fe9c89 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,47 +3,60 @@ [[projects]] branch = "master" + digest = "1:1e34eb32b95242aa7c64217164296b40fe026c14c46a8581aca7918118d7b6b8" name = "code.cloudfoundry.org/bytefmt" packages = ["."] - revision = "b31f603f5e1e047fdb38584e1a922dcc5c4de5c8" + pruneopts = "NUT" + revision = "2aa6f33b730c79971cfc3c742f279195b0abc627" [[projects]] branch = "master" + digest = "1:81f8c061c3d18ed1710957910542bc17d2b789c6cd19e0f654c30b35fd255ca5" name = "github.com/Azure/go-ansiterm" packages = [ ".", - "winterm" + "winterm", ] + pruneopts = "NUT" revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" [[projects]] + digest = "1:2be791e7b333ff7c06f8fb3dc18a7d70580e9399dbdffd352621d067ff260b6e" name = "github.com/Microsoft/go-winio" packages = ["."] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" + pruneopts = "NUT" + revision = "97e4973ce50b2ff5f09635a57e2b88a037aae829" + version = "v0.4.11" [[projects]] branch = "master" + digest = "1:7da1a26e347165227c79dfb10b90b3b5dedb6cbae50e88cdb81f5b6259b5b951" name = "github.com/Nvveen/Gotty" packages = ["."] + pruneopts = "NUT" revision = "cd527374f1e5bff4938207604a14f2e38a9cf512" [[projects]] branch = "master" + digest = "1:4a029051269e04c040c092eb4ddd92732f8f3a3921a8b43b82b30804e00f3357" name = "github.com/containerd/continuity" packages = ["pathdriver"] - revision = "d3c23511c1bf5851696cba83143d9cbcd666869b" + pruneopts = "NUT" + revision = "bd77b46c8352f74eb12c85bdc01f4b90f69d66b4" [[projects]] + digest = "1:4189ee6a3844f555124d9d2656fe7af02fca961c2a9bad9074789df13a0c62e0" name = "github.com/docker/distribution" packages = [ - ".", "digestset", - "reference" + "reference", ] - revision = "f0cc927784781fa395c06317c58dea2841ece3a9" + pruneopts = "NUT" + revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" [[projects]] + branch = "master" + digest = "1:678c256d4c5fb4be98f3c3d365d51f268b79d2d4e856e40f2d0cfb334110c145" name = "github.com/docker/docker" packages = [ "api", @@ -63,52 +76,71 @@ "api/types/versions", "api/types/volume", "client", + "errdefs", "opts", - "pkg/archive", "pkg/fileutils", "pkg/homedir", "pkg/idtools", "pkg/ioutils", - "pkg/jsonmessage", "pkg/longpath", "pkg/mount", "pkg/pools", "pkg/stdcopy", "pkg/system", - "pkg/term", - "pkg/term/windows" ] - revision = "a422774e593b33bd287d9890544ad9e09b380d8c" + pruneopts = "NUT" + revision = "299015de409743b3c85a33872dce645ed748d804" [[projects]] + digest = "1:2a47f7eb1a2c30428d1ee6808cb66d4deb17e68a3e55d696f03c8068552ba5e8" name = "github.com/docker/go-connections" packages = [ "nat", "sockets", - "tlsconfig" + "tlsconfig", ] - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" - version = "v0.3.0" + pruneopts = "NUT" + revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55" + version = "v0.4.0" [[projects]] + digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482" name = "github.com/docker/go-units" packages = ["."] + pruneopts = "NUT" revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" [[projects]] - branch = "master" + digest = "1:00d0d550a1f1d7ca03270ebc1e136f21f6b9dab37f0c37e9a90d56d2f7afcff9" + name = "github.com/docker/libnetwork" + packages = ["ipamutils"] + pruneopts = "NUT" + revision = "19279f0492417475b6bfbd0aa529f73e8f178fb5" + +[[projects]] + digest = "1:d0a3e7afbdc348a36faa023b59faf86a60404037f101bb4b8cc4ae63d8b94be1" name = "github.com/fsouza/go-dockerclient" - packages = ["."] - revision = "51bd33c0c7792b2566054672a4915b406d988cc6" + packages = [ + ".", + "internal/archive", + "internal/jsonmessage", + "internal/term", + ] + pruneopts = "NUT" + revision = "1b33efcb40bef3494af8bc6a1f73600f37a3c9d1" + version = "v1.3.0" [[projects]] + digest = "1:38e684375ef5b55e812332266d63f9fc5b6329ab303067c4cdda051db6d29ca4" name = "github.com/gogo/protobuf" packages = ["proto"] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" + pruneopts = "NUT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" [[projects]] + digest = "1:2271ce74ea1368c951255b70585fe2d41822bd887c645f7edf03ccec037eff46" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -123,117 +155,176 @@ "pkg/v1/remote/transport", "pkg/v1/tarball", "pkg/v1/types", - "pkg/v1/v1util" + "pkg/v1/v1util", ] + pruneopts = "NUT" revision = "dc0a77702ddab8c3dc5630eca7c026d571c1c9b7" [[projects]] + digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" name = "github.com/inconshreveable/mousetrap" packages = ["."] + pruneopts = "NUT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] branch = "master" + digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "NUT" + revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5" + +[[projects]] + digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23" name = "github.com/mitchellh/go-homedir" packages = ["."] - revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" + pruneopts = "NUT" + revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" + version = "v1.0.0" [[projects]] - branch = "master" + digest = "1:26217ee135b8157549e648efe97dff3282e4bd597a912d784db964df41067f29" name = "github.com/nightlyone/lockfile" packages = ["."] - revision = "6a197d5ea61168f2ac821de2b7f011b250904900" + pruneopts = "NUT" + revision = "0ad87eef1443f64d3d8c50da647e2b1552851124" [[projects]] + digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0" name = "github.com/opencontainers/go-digest" packages = ["."] + pruneopts = "NUT" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" version = "v1.0.0-rc1" [[projects]] + digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6" name = "github.com/opencontainers/image-spec" packages = [ "specs-go", - "specs-go/v1" + "specs-go/v1", ] + pruneopts = "NUT" revision = "d60099175f88c47cd379c4738d158884749ed235" version = "v1.0.1" [[projects]] + digest = "1:918dbd46ad099418ce9760291bb156ebf23d441aad4cb682dd9bf09e2d3c0c7b" name = "github.com/opencontainers/runc" - packages = [ - "libcontainer/system", - "libcontainer/user" - ] + packages = ["libcontainer/user"] + pruneopts = "NUT" revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" version = "v0.1.1" [[projects]] + digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" name = "github.com/pkg/errors" packages = ["."] + pruneopts = "NUT" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] + digest = "1:de406937d444963016a8644b65e56e2f241a1d81f76418ce59ed12257fabb5ad" name = "github.com/pkg/profile" packages = ["."] + pruneopts = "NUT" revision = "5b67d428864e92711fcbd2f8629456121a56d91f" version = "v1.2.1" [[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] + pruneopts = "NUT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] + digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1" name = "github.com/sirupsen/logrus" packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" + pruneopts = "NUT" + revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d" + version = "v1.1.0" [[projects]] + digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d" name = "github.com/spf13/cobra" packages = ["."] + pruneopts = "NUT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] + digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" name = "github.com/spf13/pflag" packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" + pruneopts = "NUT" + revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" + version = "v1.0.2" [[projects]] branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + pruneopts = "NUT" + revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900" [[projects]] branch = "master" + digest = "1:d9f9f45d3383b93ce1c7e744b3ede6974917ae4d100e1a7abfc2906e5e5e2e28" name = "golang.org/x/net" packages = [ "context", "context/ctxhttp", "internal/socks", - "proxy" + "proxy", ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + pruneopts = "NUT" + revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" [[projects]] branch = "master" + digest = "1:4ea08b5c4c51887a54caee2954db97b620736ae510bdc8514f691d3a9375d93a" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", ] - revision = "bff228c7b664c5fce602223a05fb708fd8654986" + pruneopts = "NUT" + revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "3772f28f3f9281dbe6c04a6fb717ab333dbd59045aefcb12bba9cae2eb8aed41" + input-imports = [ + "code.cloudfoundry.org/bytefmt", + "github.com/docker/docker/api/types", + "github.com/docker/docker/client", + "github.com/docker/docker/pkg/system", + "github.com/fsouza/go-dockerclient", + "github.com/google/go-containerregistry/pkg/authn", + "github.com/google/go-containerregistry/pkg/name", + "github.com/google/go-containerregistry/pkg/v1", + "github.com/google/go-containerregistry/pkg/v1/daemon", + "github.com/google/go-containerregistry/pkg/v1/mutate", + "github.com/google/go-containerregistry/pkg/v1/partial", + "github.com/google/go-containerregistry/pkg/v1/random", + "github.com/google/go-containerregistry/pkg/v1/remote", + "github.com/google/go-containerregistry/pkg/v1/tarball", + "github.com/google/go-containerregistry/pkg/v1/types", + "github.com/mitchellh/go-homedir", + "github.com/nightlyone/lockfile", + "github.com/pkg/errors", + "github.com/pkg/profile", + "github.com/pmezard/go-difflib/difflib", + "github.com/sirupsen/logrus", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 99fb9264..7ddde86e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,40 +1,27 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -# Implicit dependency of github.com/fsouza/go-dockerclient, which depends on -# github.com/docker/docker, which depends on github.com/docker/distribution. -# Since this is vendored through something other than dep, dep doesn't know -# how to resolve it, so we have to do it manually. -required = ["github.com/docker/distribution"] -[[constraint]] - name = "github.com/docker/distribution" - revision = "f0cc927784781fa395c06317c58dea2841ece3a9" +[prune] + unused-packages = true + non-go = true + go-tests = true [[constraint]] name = "github.com/fsouza/go-dockerclient" - branch = "master" + version = "v1.3.0" [[constraint]] + name = "github.com/docker/docker" branch = "master" + +[[override]] + name = "github.com/docker/distribution" + revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" + +[[override]] + name = "github.com/docker/libnetwork" + revision = "19279f0492417475b6bfbd0aa529f73e8f178fb5" + +[[constraint]] name = "github.com/nightlyone/lockfile" + revision = "0ad87eef1443f64d3d8c50da647e2b1552851124" [[constraint]] name = "github.com/pmezard/go-difflib" diff --git a/vendor/code.cloudfoundry.org/bytefmt/BUILD.bazel b/vendor/code.cloudfoundry.org/bytefmt/BUILD.bazel deleted file mode 100644 index 05d9b0dc..00000000 --- a/vendor/code.cloudfoundry.org/bytefmt/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "bytes.go", - "package.go", - ], - importpath = "code.cloudfoundry.org/bytefmt", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "bytes_test.go", - "formatters_suite_test.go", - ], - importpath = "code.cloudfoundry.org/bytefmt_test", - deps = [ - ":go_default_library", - "//vendor/github.com/onsi/ginkgo:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", - ], -) diff --git a/vendor/code.cloudfoundry.org/bytefmt/README.md b/vendor/code.cloudfoundry.org/bytefmt/README.md deleted file mode 100644 index 44d287d1..00000000 --- a/vendor/code.cloudfoundry.org/bytefmt/README.md +++ /dev/null @@ -1,15 +0,0 @@ -bytefmt -======= - -**Note**: This repository should be imported as `code.cloudfoundry.org/bytefmt`. - -Human-readable byte formatter. - -Example: - -```go -bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // returns "100.5M" -bytefmt.ByteSize(uint64(1024)) // returns "1K" -``` - -For documentation, please see http://godoc.org/code.cloudfoundry.org/bytefmt diff --git a/vendor/code.cloudfoundry.org/bytefmt/bytes.go b/vendor/code.cloudfoundry.org/bytefmt/bytes.go index f33bb95d..e0e09bbe 100644 --- a/vendor/code.cloudfoundry.org/bytefmt/bytes.go +++ b/vendor/code.cloudfoundry.org/bytefmt/bytes.go @@ -7,24 +7,20 @@ package bytefmt import ( "errors" - "fmt" - "regexp" "strconv" "strings" + "unicode" ) const ( - BYTE = 1.0 << (10 * iota) + BYTE = 1 << (10 * iota) KILOBYTE MEGABYTE GIGABYTE TERABYTE ) -var ( - bytesPattern = regexp.MustCompile(`(?i)^(-?\d+(?:\.\d+)?)([KMGT]i?B?|B)$`) - invalidByteQuantityError = errors.New("byte quantity must be a positive integer with a unit of measurement like M, MB, MiB, G, GiB, or GB") -) +var invalidByteQuantityError = errors.New("byte quantity must be a positive integer with a unit of measurement like M, MB, MiB, G, GiB, or GB") // ByteSize returns a human-readable byte string of the form 10M, 12.5K, and so forth. The following units are available: // T: Terabyte @@ -35,7 +31,7 @@ var ( // The unit that results in the smallest number greater than or equal to 1 is always chosen. func ByteSize(bytes uint64) string { unit := "" - value := float32(bytes) + value := float64(bytes) switch { case bytes >= TERABYTE: @@ -56,9 +52,9 @@ func ByteSize(bytes uint64) string { return "0" } - stringValue := fmt.Sprintf("%.1f", value) - stringValue = strings.TrimSuffix(stringValue, ".0") - return fmt.Sprintf("%s%s", stringValue, unit) + result := strconv.FormatFloat(value, 'f', 1, 64) + result = strings.TrimSuffix(result, ".0") + return result + unit } // ToMegabytes parses a string formatted by ByteSize as megabytes. @@ -77,30 +73,33 @@ func ToMegabytes(s string) (uint64, error) { // GB = G = GiB = 1024 * M // TB = T = TiB = 1024 * G func ToBytes(s string) (uint64, error) { - parts := bytesPattern.FindStringSubmatch(strings.TrimSpace(s)) - if len(parts) < 3 { + s = strings.TrimSpace(s) + s = strings.ToUpper(s) + + i := strings.IndexFunc(s, unicode.IsLetter) + + if i == -1 { return 0, invalidByteQuantityError } - value, err := strconv.ParseFloat(parts[1], 64) - if err != nil || value <= 0 { + bytesString, multiple := s[:i], s[i:] + bytes, err := strconv.ParseFloat(bytesString, 64) + if err != nil || bytes <= 0 { return 0, invalidByteQuantityError } - var bytes uint64 - unit := strings.ToUpper(parts[2]) - switch unit[:1] { - case "T": - bytes = uint64(value * TERABYTE) - case "G": - bytes = uint64(value * GIGABYTE) - case "M": - bytes = uint64(value * MEGABYTE) - case "K": - bytes = uint64(value * KILOBYTE) + switch multiple { + case "T", "TB", "TIB": + return uint64(bytes * TERABYTE), nil + case "G", "GB", "GIB": + return uint64(bytes * GIGABYTE), nil + case "M", "MB", "MIB": + return uint64(bytes * MEGABYTE), nil + case "K", "KB", "KIB": + return uint64(bytes * KILOBYTE), nil case "B": - bytes = uint64(value * BYTE) + return uint64(bytes), nil + default: + return 0, invalidByteQuantityError } - - return bytes, nil } diff --git a/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go b/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go deleted file mode 100644 index 1f2185f0..00000000 --- a/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package bytefmt_test - -import ( - . "code.cloudfoundry.org/bytefmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("bytefmt", func() { - - Context("ByteSize", func() { - It("Prints in the largest possible unit", func() { - Expect(ByteSize(10 * TERABYTE)).To(Equal("10T")) - Expect(ByteSize(uint64(10.5 * TERABYTE))).To(Equal("10.5T")) - - Expect(ByteSize(10 * GIGABYTE)).To(Equal("10G")) - Expect(ByteSize(uint64(10.5 * GIGABYTE))).To(Equal("10.5G")) - - Expect(ByteSize(100 * MEGABYTE)).To(Equal("100M")) - Expect(ByteSize(uint64(100.5 * MEGABYTE))).To(Equal("100.5M")) - - Expect(ByteSize(100 * KILOBYTE)).To(Equal("100K")) - Expect(ByteSize(uint64(100.5 * KILOBYTE))).To(Equal("100.5K")) - - Expect(ByteSize(1)).To(Equal("1B")) - }) - - It("prints '0' for zero bytes", func() { - Expect(ByteSize(0)).To(Equal("0")) - }) - }) - - Context("ToMegabytes", func() { - It("parses byte amounts with short units (e.g. M, G)", func() { - var ( - megabytes uint64 - err error - ) - - megabytes, err = ToMegabytes("5B") - Expect(megabytes).To(Equal(uint64(0))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5K") - Expect(megabytes).To(Equal(uint64(0))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5M") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5m") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("2G") - Expect(megabytes).To(Equal(uint64(2 * 1024))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("3T") - Expect(megabytes).To(Equal(uint64(3 * 1024 * 1024))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long units (e.g MB, GB)", func() { - var ( - megabytes uint64 - err error - ) - - megabytes, err = ToMegabytes("5MB") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5mb") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("2GB") - Expect(megabytes).To(Equal(uint64(2 * 1024))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("3TB") - Expect(megabytes).To(Equal(uint64(3 * 1024 * 1024))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long binary units (e.g MiB, GiB)", func() { - var ( - megabytes uint64 - err error - ) - - megabytes, err = ToMegabytes("5MiB") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5mib") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("2GiB") - Expect(megabytes).To(Equal(uint64(2 * 1024))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("3TiB") - Expect(megabytes).To(Equal(uint64(3 * 1024 * 1024))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error when the unit is missing", func() { - _, err := ToMegabytes("5") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error when the unit is unrecognized", func() { - _, err := ToMegabytes("5MBB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - - _, err = ToMegabytes("5BB") - Expect(err).To(HaveOccurred()) - }) - - It("allows whitespace before and after the value", func() { - megabytes, err := ToMegabytes("\t\n\r 5MB ") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error for negative values", func() { - _, err := ToMegabytes("-5MB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error for zero values", func() { - _, err := ToMegabytes("0TB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - }) - - Context("ToBytes", func() { - It("parses byte amounts with short units (e.g. M, G)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("5B") - Expect(bytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5K") - Expect(bytes).To(Equal(uint64(5 * KILOBYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5M") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5m") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("2G") - Expect(bytes).To(Equal(uint64(2 * GIGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("3T") - Expect(bytes).To(Equal(uint64(3 * TERABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts that are float (e.g. 5.3KB)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("13.5KB") - Expect(bytes).To(Equal(uint64(13824))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("4.5KB") - Expect(bytes).To(Equal(uint64(4608))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long units (e.g MB, GB)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("5MB") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5mb") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("2GB") - Expect(bytes).To(Equal(uint64(2 * GIGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("3TB") - Expect(bytes).To(Equal(uint64(3 * TERABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long binary units (e.g MiB, GiB)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("5MiB") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5mib") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("2GiB") - Expect(bytes).To(Equal(uint64(2 * GIGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("3TiB") - Expect(bytes).To(Equal(uint64(3 * TERABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error when the unit is missing", func() { - _, err := ToBytes("5") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error when the unit is unrecognized", func() { - _, err := ToBytes("5MBB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - - _, err = ToBytes("5BB") - Expect(err).To(HaveOccurred()) - }) - - It("allows whitespace before and after the value", func() { - bytes, err := ToBytes("\t\n\r 5MB ") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error for negative values", func() { - _, err := ToBytes("-5MB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error for zero values", func() { - _, err := ToBytes("0TB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - }) -}) diff --git a/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go b/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go deleted file mode 100644 index 64af7ca4..00000000 --- a/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package bytefmt_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestFormatters(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Bytefmt Suite") -} diff --git a/vendor/github.com/Azure/go-ansiterm/BUILD.bazel b/vendor/github.com/Azure/go-ansiterm/BUILD.bazel deleted file mode 100644 index da4036b4..00000000 --- a/vendor/github.com/Azure/go-ansiterm/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "constants.go", - "context.go", - "csi_entry_state.go", - "csi_param_state.go", - "escape_intermediate_state.go", - "escape_state.go", - "event_handler.go", - "ground_state.go", - "osc_string_state.go", - "parser.go", - "parser_action_helpers.go", - "parser_actions.go", - "states.go", - "utilities.go", - ], - importpath = "github.com/Azure/go-ansiterm", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "parser_test.go", - "parser_test_helpers_test.go", - "parser_test_utilities_test.go", - "test_event_handler_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/Azure/go-ansiterm", -) diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e..00000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test.go deleted file mode 100644 index cd4888ff..00000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package ansiterm - -import ( - "fmt" - "testing" -) - -func TestStateTransitions(t *testing.T) { - stateTransitionHelper(t, "CsiEntry", "Ground", alphabetics) - stateTransitionHelper(t, "CsiEntry", "CsiParam", csiCollectables) - stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY}) - stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D}) - stateTransitionHelper(t, "Escape", "Ground", escapeToGroundBytes) - stateTransitionHelper(t, "Escape", "EscapeIntermediate", intermeds) - stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", intermeds) - stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", executors) - stateTransitionHelper(t, "EscapeIntermediate", "Ground", escapeIntermediateToGroundBytes) - stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL}) - stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C}) - stateTransitionHelper(t, "Ground", "Ground", executors) -} - -func TestAnyToX(t *testing.T) { - anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape") - anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry") - anyToXHelper(t, []byte{OSC_STRING}, "OscString") - anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry") - anyToXHelper(t, toGroundBytes, "Ground") -} - -func TestCollectCsiParams(t *testing.T) { - parser, _ := createTestParser("CsiEntry") - parser.Parse(csiCollectables) - - buffer := parser.context.paramBuffer - bufferCount := len(buffer) - - if bufferCount != len(csiCollectables) { - t.Errorf("Buffer: %v", buffer) - t.Errorf("CsiParams: %v", csiCollectables) - t.Errorf("Buffer count failure: %d != %d", bufferCount, len(csiParams)) - return - } - - for i, v := range csiCollectables { - if v != buffer[i] { - t.Errorf("Buffer: %v", buffer) - t.Errorf("CsiParams: %v", csiParams) - t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i]) - } - } -} - -func TestParseParams(t *testing.T) { - parseParamsHelper(t, []byte{}, []string{}) - parseParamsHelper(t, []byte{';'}, []string{}) - parseParamsHelper(t, []byte{';', ';'}, []string{}) - parseParamsHelper(t, []byte{'7'}, []string{"7"}) - parseParamsHelper(t, []byte{'7', ';'}, []string{"7"}) - parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"}) - parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"}) - parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"}) - parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"}) - parseParamsHelper(t, []byte{'7', '8'}, []string{"78"}) - parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"}) - parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"}) - parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"}) - parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"}) - parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"}) -} - -func TestCursor(t *testing.T) { - cursorSingleParamHelper(t, 'A', "CUU") - cursorSingleParamHelper(t, 'B', "CUD") - cursorSingleParamHelper(t, 'C', "CUF") - cursorSingleParamHelper(t, 'D', "CUB") - cursorSingleParamHelper(t, 'E', "CNL") - cursorSingleParamHelper(t, 'F', "CPL") - cursorSingleParamHelper(t, 'G', "CHA") - cursorTwoParamHelper(t, 'H', "CUP") - cursorTwoParamHelper(t, 'f', "HVP") - funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"}) - funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"}) -} - -func TestErase(t *testing.T) { - // Erase in Display - eraseHelper(t, 'J', "ED") - - // Erase in Line - eraseHelper(t, 'K', "EL") -} - -func TestSelectGraphicRendition(t *testing.T) { - funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"}) - funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"}) - funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"}) - funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"}) -} - -func TestScroll(t *testing.T) { - scrollHelper(t, 'S', "SU") - scrollHelper(t, 'T', "SD") -} - -func TestPrint(t *testing.T) { - parser, evtHandler := createTestParser("Ground") - parser.Parse(printables) - validateState(t, parser.currState, "Ground") - - for i, v := range printables { - expectedCall := fmt.Sprintf("Print([%s])", string(v)) - actualCall := evtHandler.FunctionCalls[i] - if actualCall != expectedCall { - t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i) - } - } -} - -func TestClear(t *testing.T) { - p, _ := createTestParser("Ground") - fillContext(p.context) - p.clear() - validateEmptyContext(t, p.context) -} - -func TestClearOnStateChange(t *testing.T) { - clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY}) - clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY}) -} - -func TestC0(t *testing.T) { - expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])" - c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall}) - expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])" - c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall}) -} - -func TestEscDispatch(t *testing.T) { - funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"}) -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go deleted file mode 100644 index 562f215d..00000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package ansiterm - -import ( - "fmt" - "testing" -) - -func getStateNames() []string { - parser, _ := createTestParser("Ground") - - stateNames := []string{} - for _, state := range parser.stateMap { - stateNames = append(stateNames, state.Name()) - } - - return stateNames -} - -func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) { - for _, b := range bytes { - bytes := []byte{byte(b)} - parser, _ := createTestParser(start) - parser.Parse(bytes) - validateState(t, parser.currState, end) - } -} - -func anyToXHelper(t *testing.T, bytes []byte, expectedState string) { - for _, s := range getStateNames() { - stateTransitionHelper(t, s, expectedState, bytes) - } -} - -func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) { - parser, evtHandler := createTestParser(start) - parser.Parse(bytes) - validateState(t, parser.currState, expected) - validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) -} - -func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) { - params, err := parseParams(bytes) - - if err != nil { - t.Errorf("Parameter parse error: %v", err) - return - } - - if len(params) != len(expectedParams) { - t.Errorf("Parsed parameters: %v", params) - t.Errorf("Expected parameters: %v", expectedParams) - t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams)) - return - } - - for i, v := range expectedParams { - if v != params[i] { - t.Errorf("Parsed parameters: %v", params) - t.Errorf("Expected parameters: %v", expectedParams) - t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i) - } - } -} - -func cursorSingleParamHelper(t *testing.T, command byte, funcName string) { - funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) - funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)}) - funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) - funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) -} - -func cursorTwoParamHelper(t *testing.T, command byte, funcName string) { - funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) - funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) - funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)}) - funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)}) - funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) - funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) -} - -func eraseHelper(t *testing.T, command byte, funcName string) { - funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) - funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) - funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) - funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)}) - funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) - funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) -} - -func scrollHelper(t *testing.T, command byte, funcName string) { - funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) - funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)}) - funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)}) -} - -func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) { - p, _ := createTestParser(start) - fillContext(p.context) - p.Parse(bytes) - validateState(t, p.currState, end) - validateEmptyContext(t, p.context) -} - -func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) { - parser, evtHandler := createTestParser("Ground") - parser.Parse(bytes) - validateState(t, parser.currState, expectedState) - validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go deleted file mode 100644 index 78b885ca..00000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package ansiterm - -import ( - "testing" -) - -func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) { - evtHandler := CreateTestAnsiEventHandler() - parser := CreateParser(s, evtHandler) - - return parser, evtHandler -} - -func validateState(t *testing.T, actualState state, expectedStateName string) { - actualName := "Nil" - - if actualState != nil { - actualName = actualState.Name() - } - - if actualName != expectedStateName { - t.Errorf("Invalid state: '%s' != '%s'", actualName, expectedStateName) - } -} - -func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) { - actualCount := len(actualCalls) - expectedCount := len(expectedCalls) - - if actualCount != expectedCount { - t.Errorf("Actual calls: %v", actualCalls) - t.Errorf("Expected calls: %v", expectedCalls) - t.Errorf("Call count error: %d != %d", actualCount, expectedCount) - return - } - - for i, v := range actualCalls { - if v != expectedCalls[i] { - t.Errorf("Actual calls: %v", actualCalls) - t.Errorf("Expected calls: %v", expectedCalls) - t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i])) - } - } -} - -func fillContext(context *ansiContext) { - context.currentChar = 'A' - context.paramBuffer = []byte{'C', 'D', 'E'} - context.interBuffer = []byte{'F', 'G', 'H'} -} - -func validateEmptyContext(t *testing.T, context *ansiContext) { - var expectedCurrChar byte = 0x0 - if context.currentChar != expectedCurrChar { - t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar) - } - - if len(context.paramBuffer) != 0 { - t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer) - } - - if len(context.paramBuffer) != 0 { - t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer) - } - -} diff --git a/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go b/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go deleted file mode 100644 index 60f9f30b..00000000 --- a/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package ansiterm - -import ( - "fmt" - "strconv" -) - -type TestAnsiEventHandler struct { - FunctionCalls []string -} - -func CreateTestAnsiEventHandler() *TestAnsiEventHandler { - evtHandler := TestAnsiEventHandler{} - evtHandler.FunctionCalls = make([]string, 0) - return &evtHandler -} - -func (h *TestAnsiEventHandler) recordCall(call string, params []string) { - s := fmt.Sprintf("%s(%v)", call, params) - h.FunctionCalls = append(h.FunctionCalls, s) -} - -func (h *TestAnsiEventHandler) Print(b byte) error { - h.recordCall("Print", []string{string(b)}) - return nil -} - -func (h *TestAnsiEventHandler) Execute(b byte) error { - h.recordCall("Execute", []string{string(b)}) - return nil -} - -func (h *TestAnsiEventHandler) CUU(param int) error { - h.recordCall("CUU", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CUD(param int) error { - h.recordCall("CUD", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CUF(param int) error { - h.recordCall("CUF", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CUB(param int) error { - h.recordCall("CUB", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CNL(param int) error { - h.recordCall("CNL", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CPL(param int) error { - h.recordCall("CPL", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CHA(param int) error { - h.recordCall("CHA", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) VPA(param int) error { - h.recordCall("VPA", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) CUP(x int, y int) error { - xS, yS := strconv.Itoa(x), strconv.Itoa(y) - h.recordCall("CUP", []string{xS, yS}) - return nil -} - -func (h *TestAnsiEventHandler) HVP(x int, y int) error { - xS, yS := strconv.Itoa(x), strconv.Itoa(y) - h.recordCall("HVP", []string{xS, yS}) - return nil -} - -func (h *TestAnsiEventHandler) DECTCEM(visible bool) error { - h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)}) - return nil -} - -func (h *TestAnsiEventHandler) DECOM(visible bool) error { - h.recordCall("DECOM", []string{strconv.FormatBool(visible)}) - return nil -} - -func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error { - h.recordCall("DECOLM", []string{strconv.FormatBool(use132)}) - return nil -} - -func (h *TestAnsiEventHandler) ED(param int) error { - h.recordCall("ED", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) EL(param int) error { - h.recordCall("EL", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) IL(param int) error { - h.recordCall("IL", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) DL(param int) error { - h.recordCall("DL", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) ICH(param int) error { - h.recordCall("ICH", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) DCH(param int) error { - h.recordCall("DCH", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) SGR(params []int) error { - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.recordCall("SGR", strings) - return nil -} - -func (h *TestAnsiEventHandler) SU(param int) error { - h.recordCall("SU", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) SD(param int) error { - h.recordCall("SD", []string{strconv.Itoa(param)}) - return nil -} - -func (h *TestAnsiEventHandler) DA(params []string) error { - h.recordCall("DA", params) - return nil -} - -func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error { - topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom) - h.recordCall("DECSTBM", []string{topS, bottomS}) - return nil -} - -func (h *TestAnsiEventHandler) RI() error { - h.recordCall("RI", nil) - return nil -} - -func (h *TestAnsiEventHandler) IND() error { - h.recordCall("IND", nil) - return nil -} - -func (h *TestAnsiEventHandler) Flush() error { - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/BUILD.bazel b/vendor/github.com/Azure/go-ansiterm/winterm/BUILD.bazel deleted file mode 100644 index 89e48a42..00000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = select({ - "@io_bazel_rules_go//go/platform:windows": [ - "ansi.go", - "api.go", - "attr_translation.go", - "cursor_helpers.go", - "erase_helpers.go", - "scroll_helper.go", - "utilities.go", - "win_event_handler.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/Azure/go-ansiterm/winterm", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Azure/go-ansiterm:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1fd..00000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/vendor/github.com/Microsoft/go-winio/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/BUILD.bazel deleted file mode 100644 index 8d5184a9..00000000 --- a/vendor/github.com/Microsoft/go-winio/BUILD.bazel +++ /dev/null @@ -1,42 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "ea.go", - "reparse.go", - "syscall.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "backup.go", - "file.go", - "fileinfo.go", - "pipe.go", - "privilege.go", - "sd.go", - "zsyscall_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/Microsoft/go-winio", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "backup_test.go", - "ea_test.go", - "pipe_test.go", - "privileges_test.go", - "sd_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/Microsoft/go-winio", -) diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 56800105..00000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# go-winio - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. - -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Microsoft/go-winio/backup_test.go b/vendor/github.com/Microsoft/go-winio/backup_test.go deleted file mode 100644 index cc5a0c5f..00000000 --- a/vendor/github.com/Microsoft/go-winio/backup_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package winio - -import ( - "io" - "io/ioutil" - "os" - "syscall" - "testing" -) - -var testFileName string - -func TestMain(m *testing.M) { - f, err := ioutil.TempFile("", "tmp") - if err != nil { - panic(err) - } - testFileName = f.Name() - f.Close() - defer os.Remove(testFileName) - os.Exit(m.Run()) -} - -func makeTestFile(makeADS bool) error { - os.Remove(testFileName) - f, err := os.Create(testFileName) - if err != nil { - return err - } - defer f.Close() - _, err = f.Write([]byte("testing 1 2 3\n")) - if err != nil { - return err - } - if makeADS { - a, err := os.Create(testFileName + ":ads.txt") - if err != nil { - return err - } - defer a.Close() - _, err = a.Write([]byte("alternate data stream\n")) - if err != nil { - return err - } - } - return nil -} - -func TestBackupRead(t *testing.T) { - err := makeTestFile(true) - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if len(b) == 0 { - t.Fatal("no data") - } -} - -func TestBackupStreamRead(t *testing.T) { - err := makeTestFile(true) - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - - br := NewBackupStreamReader(r) - gotData := false - gotAltData := false - for { - hdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - - switch hdr.Id { - case BackupData: - if gotData { - t.Fatal("duplicate data") - } - if hdr.Name != "" { - t.Fatalf("unexpected name %s", hdr.Name) - } - b, err := ioutil.ReadAll(br) - if err != nil { - t.Fatal(err) - } - if string(b) != "testing 1 2 3\n" { - t.Fatalf("incorrect data %v", b) - } - gotData = true - case BackupAlternateData: - if gotAltData { - t.Fatal("duplicate alt data") - } - if hdr.Name != ":ads.txt:$DATA" { - t.Fatalf("incorrect name %s", hdr.Name) - } - b, err := ioutil.ReadAll(br) - if err != nil { - t.Fatal(err) - } - if string(b) != "alternate data stream\n" { - t.Fatalf("incorrect data %v", b) - } - gotAltData = true - default: - t.Fatalf("unknown stream ID %d", hdr.Id) - } - } - if !gotData || !gotAltData { - t.Fatal("missing stream") - } -} - -func TestBackupStreamWrite(t *testing.T) { - f, err := os.Create(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - w := NewBackupFileWriter(f, false) - defer w.Close() - - data := "testing 1 2 3\n" - altData := "alternate stream\n" - - br := NewBackupStreamWriter(w) - err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))}) - if err != nil { - t.Fatal(err) - } - n, err := br.Write([]byte(data)) - if err != nil { - t.Fatal(err) - } - if n != len(data) { - t.Fatal("short write") - } - - err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"}) - if err != nil { - t.Fatal(err) - } - n, err = br.Write([]byte(altData)) - if err != nil { - t.Fatal(err) - } - if n != len(altData) { - t.Fatal("short write") - } - - f.Close() - - b, err := ioutil.ReadFile(testFileName) - if err != nil { - t.Fatal(err) - } - if string(b) != data { - t.Fatalf("wrong data %v", b) - } - - b, err = ioutil.ReadFile(testFileName + ":ads.txt") - if err != nil { - t.Fatal(err) - } - if string(b) != altData { - t.Fatalf("wrong data %v", b) - } -} - -func makeSparseFile() error { - os.Remove(testFileName) - f, err := os.Create(testFileName) - if err != nil { - return err - } - defer f.Close() - - const ( - FSCTL_SET_SPARSE = 0x000900c4 - FSCTL_SET_ZERO_DATA = 0x000980c8 - ) - - err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil) - if err != nil { - return err - } - - _, err = f.Write([]byte("testing 1 2 3\n")) - if err != nil { - return err - } - - _, err = f.Seek(1000000, 0) - if err != nil { - return err - } - - _, err = f.Write([]byte("more data later\n")) - if err != nil { - return err - } - - return nil -} - -func TestBackupSparseFile(t *testing.T) { - err := makeSparseFile() - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - - br := NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - - t.Log(hdr) - } -} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go index b37e930d..4051c1b3 100644 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -1,137 +1,137 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea_test.go b/vendor/github.com/Microsoft/go-winio/ea_test.go deleted file mode 100644 index 92d9d457..00000000 --- a/vendor/github.com/Microsoft/go-winio/ea_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package winio - -import ( - "io/ioutil" - "os" - "reflect" - "syscall" - "testing" - "unsafe" -) - -var ( - testEas = []ExtendedAttribute{ - {Name: "foo", Value: []byte("bar")}, - {Name: "fizz", Value: []byte("buzz")}, - } - - testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} - testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] - testEasTruncated = testEasEncoded[0:20] -) - -func Test_RoundTripEas(t *testing.T) { - b, err := EncodeExtendedAttributes(testEas) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(testEasEncoded, b) { - t.Fatalf("encoded mismatch %v %v", testEasEncoded, b) - } - eas, err := DecodeExtendedAttributes(b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(testEas, eas) { - t.Fatalf("mismatch %+v %+v", testEas, eas) - } -} - -func Test_EasDontNeedPaddingAtEnd(t *testing.T) { - eas, err := DecodeExtendedAttributes(testEasNotPadded) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(testEas, eas) { - t.Fatalf("mismatch %+v %+v", testEas, eas) - } -} - -func Test_TruncatedEasFailCorrectly(t *testing.T) { - _, err := DecodeExtendedAttributes(testEasTruncated) - if err == nil { - t.Fatal("expected error") - } -} - -func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) { - b, err := EncodeExtendedAttributes(nil) - if err != nil { - t.Fatal(err) - } - if len(b) != 0 { - t.Fatal("expected empty") - } - eas, err := DecodeExtendedAttributes(nil) - if err != nil { - t.Fatal(err) - } - if len(eas) != 0 { - t.Fatal("expected empty") - } -} - -// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. -func Test_SetFileEa(t *testing.T) { - f, err := ioutil.TempFile("", "winio") - if err != nil { - t.Fatal(err) - } - defer os.Remove(f.Name()) - defer f.Close() - ntdll := syscall.MustLoadDLL("ntdll.dll") - ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") - var iosb [2]uintptr - r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded))) - if r != 0 { - t.Fatalf("NtSetEaFile failed with %08x", r) - } -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index b1d60abb..ada2fbab 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -20,7 +20,8 @@ const ( // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime - FileAttributes uintptr // includes padding + FileAttributes uint32 + pad uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 82cbe7af..d99eedb6 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -15,7 +15,6 @@ import ( //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW //sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -121,6 +120,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil } return n, err } @@ -134,12 +138,14 @@ func (s pipeAddress) String() string { } // DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then the timeout -// is the default timeout established by the pipe server. +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(time.Second * 2) } var err error var h syscall.Handle @@ -148,22 +154,13 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { if err != cERROR_PIPE_BUSY { break } - now := time.Now() - var ms uint32 - if absTimeout.IsZero() { - ms = cNMPWAIT_USE_DEFAULT_WAIT - } else if now.After(absTimeout) { - ms = cNMPWAIT_NOWAIT - } else { - ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) - } - err = waitNamedPipe(path, ms) - if err != nil { - if err == cERROR_SEM_TIMEOUT { - return nil, ErrTimeout - } - break + if time.Now().After(absTimeout) { + return nil, ErrTimeout } + + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) } if err != nil { return nil, &os.PathError{Op: "open", Path: path, Err: err} @@ -175,16 +172,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { return nil, err } - var state uint32 - err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) - if err != nil { - return nil, err - } - - if state&cPIPE_READMODE_MESSAGE != 0 { - return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} - } - f, err := makeWin32File(h) if err != nil { syscall.Close(h) @@ -354,13 +341,23 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Immediately open and then close a client handle so that the named pipe is - // created but not currently accepting connections. + // Create a client handle and connect it. This results in the pipe + // instance always existing, so that clients see ERROR_PIPE_BUSY + // rather than ERROR_FILE_NOT_FOUND. This ties the first instance + // up so that no other instances can be used. This would have been + // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe + // instead of CreateNamedPipe. (Apparently created named pipes are + // considered to be in listening state regardless of whether any + // active calls to ConnectNamedPipe are outstanding.) h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) if err != nil { syscall.Close(h) return nil, err } + // Close the client handle. The server side of the instance will + // still be busy, leading to ERROR_PIPE_BUSY instead of + // ERROR_NOT_FOUND, as long as we don't close the server handle, + // or disconnect the client with DisconnectNamedPipe. syscall.Close(h2) l := &win32PipeListener{ firstHandle: h, diff --git a/vendor/github.com/Microsoft/go-winio/pipe_test.go b/vendor/github.com/Microsoft/go-winio/pipe_test.go deleted file mode 100644 index c0d1a774..00000000 --- a/vendor/github.com/Microsoft/go-winio/pipe_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package winio - -import ( - "bufio" - "io" - "net" - "os" - "syscall" - "testing" - "time" -) - -var testPipeName = `\\.\pipe\winiotestpipe` - -var aLongTimeAgo = time.Unix(1, 0) - -func TestDialUnknownFailsImmediately(t *testing.T) { - _, err := DialPipe(testPipeName, nil) - if err.(*os.PathError).Err != syscall.ENOENT { - t.Fatalf("expected ENOENT got %v", err) - } -} - -func TestDialListenerTimesOut(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - var d = time.Duration(10 * time.Millisecond) - _, err = DialPipe(testPipeName, &d) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} - -func TestDialAccessDeniedWithRestrictedSD(t *testing.T) { - c := PipeConfig{ - SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)", - } - l, err := ListenPipe(testPipeName, &c) - if err != nil { - t.Fatal(err) - } - defer l.Close() - _, err = DialPipe(testPipeName, nil) - if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED { - t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err) - } -} - -func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) { - l, err := ListenPipe(testPipeName, cfg) - if err != nil { - return - } - defer l.Close() - - type response struct { - c net.Conn - err error - } - ch := make(chan response) - go func() { - c, err := l.Accept() - ch <- response{c, err} - }() - - c, err := DialPipe(testPipeName, nil) - if err != nil { - return - } - - r := <-ch - if err = r.err; err != nil { - c.Close() - return - } - - client = c - server = r.c - return -} - -func TestReadTimeout(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - - c.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - - buf := make([]byte, 10) - _, err = c.Read(buf) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} - -func server(l net.Listener, ch chan int) { - c, err := l.Accept() - if err != nil { - panic(err) - } - rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) - s, err := rw.ReadString('\n') - if err != nil { - panic(err) - } - _, err = rw.WriteString("got " + s) - if err != nil { - panic(err) - } - err = rw.Flush() - if err != nil { - panic(err) - } - c.Close() - ch <- 1 -} - -func TestFullListenDialReadWrite(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - ch := make(chan int) - go server(l, ch) - - c, err := DialPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) - _, err = rw.WriteString("hello world\n") - if err != nil { - t.Fatal(err) - } - err = rw.Flush() - if err != nil { - t.Fatal(err) - } - - s, err := rw.ReadString('\n') - if err != nil { - t.Fatal(err) - } - ms := "got hello world\n" - if s != ms { - t.Errorf("expected '%s', got '%s'", ms, s) - } - - <-ch -} - -func TestCloseAbortsListen(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - - ch := make(chan error) - go func() { - _, err := l.Accept() - ch <- err - }() - - time.Sleep(30 * time.Millisecond) - l.Close() - - err = <-ch - if err != ErrPipeListenerClosed { - t.Fatalf("expected ErrPipeListenerClosed, got %v", err) - } -} - -func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) { - b := make([]byte, 10) - w.Close() - n, err := r.Read(b) - if n > 0 { - t.Errorf("unexpected byte count %d", n) - } - if err != io.EOF { - t.Errorf("expected EOF: %v", err) - } -} - -func TestCloseClientEOFServer(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - ensureEOFOnClose(t, c, s) -} - -func TestCloseServerEOFClient(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - ensureEOFOnClose(t, s, c) -} - -func TestCloseWriteEOF(t *testing.T) { - cfg := &PipeConfig{ - MessageMode: true, - } - c, s, err := getConnection(cfg) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - - type closeWriter interface { - CloseWrite() error - } - - err = c.(closeWriter).CloseWrite() - if err != nil { - t.Fatal(err) - } - - b := make([]byte, 10) - _, err = s.Read(b) - if err != io.EOF { - t.Fatal(err) - } -} - -func TestAcceptAfterCloseFails(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - l.Close() - _, err = l.Accept() - if err != ErrPipeListenerClosed { - t.Fatalf("expected ErrPipeListenerClosed, got %v", err) - } -} - -func TestDialTimesOutByDefault(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - _, err = DialPipe(testPipeName, nil) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} - -func TestTimeoutPendingRead(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - serverDone := make(chan struct{}) - - go func() { - s, err := l.Accept() - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - s.Close() - close(serverDone) - }() - - client, err := DialPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - clientErr := make(chan error) - go func() { - buf := make([]byte, 10) - _, err = client.Read(buf) - clientErr <- err - }() - - time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline - client.SetReadDeadline(aLongTimeAgo) - - select { - case err = <-clientErr: - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } - case <-time.After(100 * time.Millisecond): - t.Fatalf("timed out while waiting for read to cancel") - <-clientErr - } - <-serverDone -} - -func TestTimeoutPendingWrite(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - serverDone := make(chan struct{}) - - go func() { - s, err := l.Accept() - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - s.Close() - close(serverDone) - }() - - client, err := DialPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - clientErr := make(chan error) - go func() { - _, err = client.Write([]byte("this should timeout")) - clientErr <- err - }() - - time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline - client.SetWriteDeadline(aLongTimeAgo) - - select { - case err = <-clientErr: - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } - case <-time.After(100 * time.Millisecond): - t.Fatalf("timed out while waiting for write to cancel") - <-clientErr - } - <-serverDone -} - -type CloseWriter interface { - CloseWrite() error -} - -func TestEchoWithMessaging(t *testing.T) { - c := PipeConfig{ - MessageMode: true, // Use message mode so that CloseWrite() is supported - InputBufferSize: 65536, // Use 64KB buffers to improve performance - OutputBufferSize: 65536, - } - l, err := ListenPipe(testPipeName, &c) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - listenerDone := make(chan bool) - clientDone := make(chan bool) - go func() { - // server echo - conn, e := l.Accept() - if e != nil { - t.Fatal(e) - } - defer conn.Close() - - time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent - io.Copy(conn, conn) - conn.(CloseWriter).CloseWrite() - close(listenerDone) - }() - timeout := 1 * time.Second - client, err := DialPipe(testPipeName, &timeout) - if err != nil { - t.Fatal(err) - } - defer client.Close() - - go func() { - // client read back - bytes := make([]byte, 2) - n, e := client.Read(bytes) - if e != nil { - t.Fatal(e) - } - if n != 2 { - t.Fatalf("expected 2 bytes, got %v", n) - } - close(clientDone) - }() - - payload := make([]byte, 2) - payload[0] = 0 - payload[1] = 1 - - n, err := client.Write(payload) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("expected 2 bytes, got %v", n) - } - client.(CloseWriter).CloseWrite() - <-listenerDone - <-clientDone -} - -func TestConnectRace(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - go func() { - for { - s, err := l.Accept() - if err == ErrPipeListenerClosed { - return - } - - if err != nil { - t.Fatal(err) - } - s.Close() - } - }() - - for i := 0; i < 1000; i++ { - c, err := DialPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - c.Close() - } -} diff --git a/vendor/github.com/Microsoft/go-winio/privileges_test.go b/vendor/github.com/Microsoft/go-winio/privileges_test.go deleted file mode 100644 index 5e94c48c..00000000 --- a/vendor/github.com/Microsoft/go-winio/privileges_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package winio - -import "testing" - -func TestRunWithUnavailablePrivilege(t *testing.T) { - err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil }) - if _, ok := err.(*PrivilegeError); err == nil || !ok { - t.Fatal("expected PrivilegeError") - } -} - -func TestRunWithPrivileges(t *testing.T) { - err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/Microsoft/go-winio/sd_test.go b/vendor/github.com/Microsoft/go-winio/sd_test.go deleted file mode 100644 index 847db3c1..00000000 --- a/vendor/github.com/Microsoft/go-winio/sd_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package winio - -import "testing" - -func TestLookupInvalidSid(t *testing.T) { - _, err := LookupSidByName(".\\weoifjdsklfj") - aerr, ok := err.(*AccountLookupError) - if !ok || aerr.Err != cERROR_NONE_MAPPED { - t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) - } -} - -func TestLookupValidSid(t *testing.T) { - sid, err := LookupSidByName("Everyone") - if err != nil || sid != "S-1-1-0" { - t.Fatal("expected S-1-1-0, got %s, %s", sid, err) - } -} - -func TestLookupEmptyNameFails(t *testing.T) { - _, err := LookupSidByName(".\\weoifjdsklfj") - aerr, ok := err.(*AccountLookupError) - if !ok || aerr.Err != cERROR_NONE_MAPPED { - t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) - } -} diff --git a/vendor/github.com/Nvveen/Gotty/BUILD.bazel b/vendor/github.com/Nvveen/Gotty/BUILD.bazel deleted file mode 100644 index 68e67e14..00000000 --- a/vendor/github.com/Nvveen/Gotty/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "attributes.go", - "gotty.go", - "parser.go", - "types.go", - ], - importpath = "github.com/Nvveen/Gotty", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README deleted file mode 100644 index a6b0d9a8..00000000 --- a/vendor/github.com/Nvveen/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/vendor/github.com/Nvveen/Gotty/TODO b/vendor/github.com/Nvveen/Gotty/TODO deleted file mode 100644 index 47046053..00000000 --- a/vendor/github.com/Nvveen/Gotty/TODO +++ /dev/null @@ -1,3 +0,0 @@ -gotty.go:// TODO add more concurrency to name lookup, look for more opportunities. -all:// TODO add more documentation, with function usage in a doc.go file. -all:// TODO add more testing/benchmarking with go test. diff --git a/vendor/github.com/containerd/continuity/.gitignore b/vendor/github.com/containerd/continuity/.gitignore deleted file mode 100644 index 6921662d..00000000 --- a/vendor/github.com/containerd/continuity/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -bin - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/containerd/continuity/.mailmap b/vendor/github.com/containerd/continuity/.mailmap deleted file mode 100644 index f48ae41a..00000000 --- a/vendor/github.com/containerd/continuity/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Stephen J Day Stephen Day diff --git a/vendor/github.com/containerd/continuity/.travis.yml b/vendor/github.com/containerd/continuity/.travis.yml deleted file mode 100644 index 4ddbe28b..00000000 --- a/vendor/github.com/containerd/continuity/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: required - -go: - - 1.8.x - - 1.9.x - - tip - -go_import_path: github.com/containerd/continuity - -env: -# NOTE: we cannot set GOOS directly (because gimme overrides the value) - - TRAVIS_GOOS=windows - - TRAVIS_GOOS=linux - - TRAVIS_GOOS=darwin - -script: - - export GOOS=${TRAVIS_GOOS} - - make build binaries - - if [ "$GOOS" = "linux" ]; then make vet test; fi - - if [ "$GOOS" != "linux" ]; then make test-compile; fi diff --git a/vendor/github.com/containerd/continuity/BUILD.bazel b/vendor/github.com/containerd/continuity/BUILD.bazel deleted file mode 100644 index b5fb33e9..00000000 --- a/vendor/github.com/containerd/continuity/BUILD.bazel +++ /dev/null @@ -1,124 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "digests.go", - "groups_unix.go", - "hardlinks.go", - "ioutils.go", - "manifest.go", - "resource.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "hardlinks_unix.go", - "resource_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "hardlinks_unix.go", - "resource_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "hardlinks_unix.go", - "resource_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "hardlinks_unix.go", - "resource_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "hardlinks_windows.go", - "resource_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/containerd/continuity", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - "//vendor/github.com/containerd/continuity/driver:go_default_library", - "//vendor/github.com/containerd/continuity/pathdriver:go_default_library", - "//vendor/github.com/containerd/continuity/proto:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "digests_test.go", - "resource_test.go", - "testutil_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "manifest_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "manifest_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/containerd/continuity", - deps = [ - "//vendor/github.com/opencontainers/go-digest:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/containerd/continuity/devices:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/containerd/continuity/Makefile b/vendor/github.com/containerd/continuity/Makefile deleted file mode 100644 index 35fa6bd8..00000000 --- a/vendor/github.com/containerd/continuity/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -PACKAGES=$(shell go list ./... | grep -v /vendor/) - -.PHONY: clean all fmt vet lint build test binaries setup -.DEFAULT: default -# skip lint at the moment -all: AUTHORS clean fmt vet fmt build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -${PREFIX}/bin/continuity: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/continuity - -setup: - @echo "+ $@" - @go get -u github.com/golang/lint/golint - -generate: - go generate $(PACKAGES) - -# Depends on binaries because vet will silently fail if it can't load compiled -# imports -vet: binaries - @echo "+ $@" - @go vet $(PACKAGES) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | grep -v vendor/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" - -lint: - @echo "+ $@" - @test -z "$$(golint $(PACKAGES) | grep -v Godeps/_workspace/src/ | grep -v vendor/ |tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -v ${GO_LDFLAGS} $(PACKAGES) - -test: - @echo "+ $@" - @go test $(PACKAGES) - -test-compile: - @echo "+ $@" - @for pkg in $(PACKAGES); do go test -c $$pkg; done - -binaries: ${PREFIX}/bin/continuity - @echo "+ $@" - @if [ x$$GOOS = xwindows ]; then echo "+ continuity -> continuity.exe"; mv ${PREFIX}/bin/continuity ${PREFIX}/bin/continuity.exe; fi - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/continuity" "${PREFIX}/bin/continuity.exe" - diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md deleted file mode 100644 index 0e91ce07..00000000 --- a/vendor/github.com/containerd/continuity/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# continuity - -[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) -[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity) - -A transport-agnostic, filesystem metadata manifest system - -This project is a staging area for experiments in providing transport agnostic -metadata storage. - -Please see https://github.com/opencontainers/specs/issues/11 for more details. - -## Manifest Format - -A continuity manifest encodes filesystem metadata in Protocol Buffers. -Please refer to [proto/manifest.proto](proto/manifest.proto). - -## Usage - -Build: - -```console -$ make -``` - -Create a manifest (of this repo itself): - -```console -$ ./bin/continuity build . > /tmp/a.pb -``` - -Dump a manifest: - -```console -$ ./bin/continuity ls /tmp/a.pb -... --rw-rw-r-- 270 B /.gitignore --rw-rw-r-- 88 B /.mailmap --rw-rw-r-- 187 B /.travis.yml --rw-rw-r-- 359 B /AUTHORS --rw-rw-r-- 11 kB /LICENSE --rw-rw-r-- 1.5 kB /Makefile -... --rw-rw-r-- 986 B /testutil_test.go -drwxrwxr-x 0 B /version --rw-rw-r-- 478 B /version/version.go -``` - -Verify a manifest: - -```console -$ ./bin/continuity verify . /tmp/a.pb -``` - -Break the directory and restore using the manifest: -```console -$ chmod 777 Makefile -$ ./bin/continuity verify . /tmp/a.pb -2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r-- -$ ./bin/continuity apply . /tmp/a.pb -$ stat -c %a Makefile -664 -$ ./bin/continuity verify . /tmp/a.pb -``` - - -## Contribution Guide -### Building Proto Package - -If you change the proto file you will need to rebuild the generated Go with `go generate`. - -```console -$ go generate ./proto -``` diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go deleted file mode 100644 index ec21cf4f..00000000 --- a/vendor/github.com/containerd/continuity/context.go +++ /dev/null @@ -1,657 +0,0 @@ -package continuity - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/containerd/continuity/devices" - driverpkg "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - "github.com/opencontainers/go-digest" -) - -var ( - ErrNotFound = fmt.Errorf("not found") - ErrNotSupported = fmt.Errorf("not supported") -) - -// Context represents a file system context for accessing resources. The -// responsibility of the context is to convert system specific resources to -// generic Resource objects. Most of this is safe path manipulation, as well -// as extraction of resource details. -type Context interface { - Apply(Resource) error - Verify(Resource) error - Resource(string, os.FileInfo) (Resource, error) - Walk(filepath.WalkFunc) error -} - -// SymlinkPath is intended to give the symlink target value -// in a root context. Target and linkname are absolute paths -// not under the given root. -type SymlinkPath func(root, linkname, target string) (string, error) - -type ContextOptions struct { - Digester Digester - Driver driverpkg.Driver - PathDriver pathdriver.PathDriver - Provider ContentProvider -} - -// context represents a file system context for accessing resources. -// Generally, all path qualified access and system considerations should land -// here. -type context struct { - driver driverpkg.Driver - pathDriver pathdriver.PathDriver - root string - digester Digester - provider ContentProvider -} - -// NewContext returns a Context associated with root. The default driver will -// be used, as returned by NewDriver. -func NewContext(root string) (Context, error) { - return NewContextWithOptions(root, ContextOptions{}) -} - -// NewContextWithOptions returns a Context associate with the root. -func NewContextWithOptions(root string, options ContextOptions) (Context, error) { - // normalize to absolute path - pathDriver := options.PathDriver - if pathDriver == nil { - pathDriver = pathdriver.LocalPathDriver - } - - root = pathDriver.FromSlash(root) - root, err := pathDriver.Abs(pathDriver.Clean(root)) - if err != nil { - return nil, err - } - - driver := options.Driver - if driver == nil { - driver, err = driverpkg.NewSystemDriver() - if err != nil { - return nil, err - } - } - - digester := options.Digester - if digester == nil { - digester = simpleDigester{digest.Canonical} - } - - // Check the root directory. Need to be a little careful here. We are - // allowing a link for now, but this may have odd behavior when - // canonicalizing paths. As long as all files are opened through the link - // path, this should be okay. - fi, err := driver.Stat(root) - if err != nil { - return nil, err - } - - if !fi.IsDir() { - return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid} - } - - return &context{ - root: root, - driver: driver, - pathDriver: pathDriver, - digester: digester, - provider: options.Provider, - }, nil -} - -// Resource returns the resource as path p, populating the entry with info -// from fi. The path p should be the path of the resource in the context, -// typically obtained through Walk or from the value of Resource.Path(). If fi -// is nil, it will be resolved. -func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) { - fp, err := c.fullpath(p) - if err != nil { - return nil, err - } - - if fi == nil { - fi, err = c.driver.Lstat(fp) - if err != nil { - return nil, err - } - } - - base, err := newBaseResource(p, fi) - if err != nil { - return nil, err - } - - base.xattrs, err = c.resolveXAttrs(fp, fi, base) - if err == ErrNotSupported { - log.Printf("resolving xattrs on %s not supported", fp) - } else if err != nil { - return nil, err - } - - // TODO(stevvooe): Handle windows alternate data streams. - - if fi.Mode().IsRegular() { - dgst, err := c.digest(p) - if err != nil { - return nil, err - } - - return newRegularFile(*base, base.paths, fi.Size(), dgst) - } - - if fi.Mode().IsDir() { - return newDirectory(*base) - } - - if fi.Mode()&os.ModeSymlink != 0 { - // We handle relative links vs absolute links by including a - // beginning slash for absolute links. Effectively, the bundle's - // root is treated as the absolute link anchor. - target, err := c.driver.Readlink(fp) - if err != nil { - return nil, err - } - - return newSymLink(*base, target) - } - - if fi.Mode()&os.ModeNamedPipe != 0 { - return newNamedPipe(*base, base.paths) - } - - if fi.Mode()&os.ModeDevice != 0 { - deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver) - if !ok { - log.Printf("device extraction not supported %s", fp) - return nil, ErrNotSupported - } - - // character and block devices merely need to recover the - // major/minor device number. - major, minor, err := deviceDriver.DeviceInfo(fi) - if err != nil { - return nil, err - } - - return newDevice(*base, base.paths, major, minor) - } - - log.Printf("%q (%v) is not supported", fp, fi.Mode()) - return nil, ErrNotFound -} - -func (c *context) verifyMetadata(resource, target Resource) error { - if target.Mode() != resource.Mode() { - return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode()) - } - - if target.UID() != resource.UID() { - return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID()) - } - - if target.GID() != resource.GID() { - return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID()) - } - - if xattrer, ok := resource.(XAttrer); ok { - txattrer, tok := target.(XAttrer) - if !tok { - return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path()) - } - - // For xattrs, only ensure that we have those defined in the resource - // and their values match. We can ignore other xattrs. In other words, - // we only verify that target has the subset defined by resource. - txattrs := txattrer.XAttrs() - for attr, value := range xattrer.XAttrs() { - tvalue, ok := txattrs[attr] - if !ok { - return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr) - } - - if !bytes.Equal(value, tvalue) { - return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path()) - } - } - } - - switch r := resource.(type) { - case RegularFile: - // TODO(stevvooe): Another reason to use a record-based approach. We - // have to do another type switch to get this to work. This could be - // fixed with an Equal function, but let's study this a little more to - // be sure. - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - if t.Size() != r.Size() { - return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size()) - } - case Directory: - t, ok := target.(Directory) - if !ok { - return fmt.Errorf("resource %q target not a directory", t.Path()) - } - case SymLink: - t, ok := target.(SymLink) - if !ok { - return fmt.Errorf("resource %q target not a symlink", t.Path()) - } - - if t.Target() != r.Target() { - return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target()) - } - case Device: - t, ok := target.(Device) - if !ok { - return fmt.Errorf("resource %q is not a device", t.Path()) - } - - if t.Major() != r.Major() || t.Minor() != r.Minor() { - return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor()) - } - case NamedPipe: - t, ok := target.(NamedPipe) - if !ok { - return fmt.Errorf("resource %q is not a named pipe", t.Path()) - } - default: - return fmt.Errorf("cannot verify resource: %v", resource) - } - - return nil -} - -// Verify the resource in the context. An error will be returned a discrepancy -// is found. -func (c *context) Verify(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - fi, err := c.driver.Lstat(fp) - if err != nil { - return err - } - - target, err := c.Resource(resource.Path(), fi) - if err != nil { - return err - } - - if target.Path() != resource.Path() { - return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path()) - } - - if err := c.verifyMetadata(resource, target); err != nil { - return err - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - hardlinkKey, err := newHardlinkKey(fi) - if err == errNotAHardLink { - if len(h.Paths()) > 1 { - return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path()) - } - } else if err != nil { - return err - } - - for _, path := range h.Paths()[1:] { - fpLink, err := c.fullpath(path) - if err != nil { - return err - } - - fiLink, err := c.driver.Lstat(fpLink) - if err != nil { - return err - } - - targetLink, err := c.Resource(path, fiLink) - if err != nil { - return err - } - - hardlinkKeyLink, err := newHardlinkKey(fiLink) - if err != nil { - return err - } - - if hardlinkKeyLink != hardlinkKey { - return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path()) - } - - if err := c.verifyMetadata(resource, targetLink); err != nil { - return err - } - } - } - - switch r := resource.(type) { - case RegularFile: - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - // TODO(stevvooe): This may need to get a little more sophisticated - // for digest comparison. We may want to actually calculate the - // provided digests, rather than the implementations having an - // overlap. - if !digestsMatch(t.Digests(), r.Digests()) { - return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests()) - } - } - - return nil -} - -func (c *context) checkoutFile(fp string, rf RegularFile) error { - if c.provider == nil { - return fmt.Errorf("no file provider") - } - var ( - r io.ReadCloser - err error - ) - for _, dgst := range rf.Digests() { - r, err = c.provider.Reader(dgst) - if err == nil { - break - } - } - if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) - } - defer r.Close() - - return atomicWriteFile(fp, r, rf) -} - -// Apply the resource to the contexts. An error will be returned if the -// operation fails. Depending on the resource type, the resource may be -// created. For resource that cannot be resolved, an error will be returned. -func (c *context) Apply(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - if !strings.HasPrefix(fp, c.root) { - return fmt.Errorf("resource %v escapes root", resource) - } - - var chmod = true - fi, err := c.driver.Lstat(fp) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - switch r := resource.(type) { - case RegularFile: - if fi == nil { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - chmod = false - } else { - if !fi.Mode().IsRegular() { - return fmt.Errorf("file %q should be a regular file, but is not", resource.Path()) - } - if fi.Size() != r.Size() { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - } else { - for _, dgst := range r.Digests() { - f, err := os.Open(fp) - if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) - } - compared, err := dgst.Algorithm().FromReader(f) - if err == nil && dgst != compared { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - break - } - if err1 := f.Close(); err == nil { - err = err1 - } - if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) - } - } - } - } - case Directory: - if fi == nil { - if err := c.driver.Mkdir(fp, resource.Mode()); err != nil { - return err - } - } else if !fi.Mode().IsDir() { - return fmt.Errorf("%q should be a directory, but is not", resource.Path()) - } - - case SymLink: - var target string // only possibly set if target resource is a symlink - - if fi != nil { - if fi.Mode()&os.ModeSymlink != 0 { - target, err = c.driver.Readlink(fp) - if err != nil { - return err - } - } - } - - if target != r.Target() { - if fi != nil { - if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory? - return err - } - } - - if err := c.driver.Symlink(r.Target(), fp); err != nil { - return err - } - } - - // NOTE(stevvooe): Chmod on symlink is not supported on linux. We - // may want to maintain support for other platforms that have it. - chmod = false - - case Device: - if fi == nil { - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } else if (fi.Mode() & os.ModeDevice) == 0 { - return fmt.Errorf("%q should be a device, but is not", resource.Path()) - } else { - major, minor, err := devices.DeviceInfo(fi) - if err != nil { - return err - } - if major != r.Major() || minor != r.Minor() { - if err := c.driver.Remove(fp); err != nil { - return err - } - - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } - } - - case NamedPipe: - if fi == nil { - if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil { - return err - } - } else if (fi.Mode() & os.ModeNamedPipe) == 0 { - return fmt.Errorf("%q should be a named pipe, but is not", resource.Path()) - } - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - for _, path := range h.Paths() { - if path == resource.Path() { - continue - } - - lp, err := c.fullpath(path) - if err != nil { - return err - } - - if _, fi := c.driver.Lstat(lp); fi == nil { - c.driver.Remove(lp) - } - if err := c.driver.Link(fp, lp); err != nil { - return err - } - } - } - - // Update filemode if file was not created - if chmod { - if err := c.driver.Lchmod(fp, resource.Mode()); err != nil { - return err - } - } - - if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil { - return err - } - - if xattrer, ok := resource.(XAttrer); ok { - // For xattrs, only ensure that we have those defined in the resource - // and their values are set. We can ignore other xattrs. In other words, - // we only set xattres defined by resource but never remove. - - if _, ok := resource.(SymLink); ok { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path()) - } - if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } else { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - return fmt.Errorf("unsupported xattr for resource %q", resource.Path()) - } - if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } - } - - return nil -} - -// Walk provides a convenience function to call filepath.Walk correctly for -// the context. Otherwise identical to filepath.Walk, the path argument is -// corrected to be contained within the context. -func (c *context) Walk(fn filepath.WalkFunc) error { - root := c.root - fi, err := c.driver.Lstat(c.root) - if err == nil && fi.Mode()&os.ModeSymlink != 0 { - root, err = c.driver.Readlink(c.root) - if err != nil { - return err - } - } - return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, err error) error { - contained, err := c.containWithRoot(p, root) - return fn(contained, fi, err) - }) -} - -// fullpath returns the system path for the resource, joined with the context -// root. The path p must be a part of the context. -func (c *context) fullpath(p string) (string, error) { - p = c.pathDriver.Join(c.root, p) - if !strings.HasPrefix(p, c.root) { - return "", fmt.Errorf("invalid context path") - } - - return p, nil -} - -// contain cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the context root. -func (c *context) contain(p string) (string, error) { - return c.containWithRoot(p, c.root) -} - -// containWithRoot cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the passed root. Extra care should be used when calling this -// instead of contain. This is needed for Walk, as if context root is a symlink, -// it must be evaluated prior to the Walk -func (c *context) containWithRoot(p string, root string) (string, error) { - sanitized, err := c.pathDriver.Rel(root, p) - if err != nil { - return "", err - } - - // ZOMBIES(stevvooe): In certain cases, we may want to remap these to a - // "containment error", so the caller can decide what to do. - return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil -} - -// digest returns the digest of the file at path p, relative to the root. -func (c *context) digest(p string) (digest.Digest, error) { - f, err := c.driver.Open(c.pathDriver.Join(c.root, p)) - if err != nil { - return "", err - } - defer f.Close() - - return c.digester.Digest(f) -} - -// resolveXAttrs attempts to resolve the extended attributes for the resource -// at the path fp, which is the full path to the resource. If the resource -// cannot have xattrs, nil will be returned. -func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) { - if fi.Mode().IsRegular() || fi.Mode().IsDir() { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - log.Println("xattr extraction not supported") - return nil, ErrNotSupported - } - - return xattrDriver.Getxattr(fp) - } - - if fi.Mode()&os.ModeSymlink != 0 { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - log.Println("xattr extraction for symlinks not supported") - return nil, ErrNotSupported - } - - return lxattrDriver.LGetxattr(fp) - } - - return nil, nil -} diff --git a/vendor/github.com/containerd/continuity/digests.go b/vendor/github.com/containerd/continuity/digests.go deleted file mode 100644 index 355b0803..00000000 --- a/vendor/github.com/containerd/continuity/digests.go +++ /dev/null @@ -1,88 +0,0 @@ -package continuity - -import ( - "fmt" - "io" - "sort" - - "github.com/opencontainers/go-digest" -) - -// Digester produces a digest for a given read stream -type Digester interface { - Digest(io.Reader) (digest.Digest, error) -} - -// ContentProvider produces a read stream for a given digest -type ContentProvider interface { - Reader(digest.Digest) (io.ReadCloser, error) -} - -type simpleDigester struct { - algorithm digest.Algorithm -} - -func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) { - digester := sd.algorithm.Digester() - - if _, err := io.Copy(digester.Hash(), r); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the -// digests are not repeated and no two digests with the same algorithm have -// different values. Because a stable sort is used, this has the effect of -// "zipping" digest collections from multiple resources. -func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) { - sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here. - seen := map[digest.Digest]struct{}{} - algs := map[digest.Algorithm][]digest.Digest{} // detect different digests. - - var out []digest.Digest - // uniqify the digests - for _, d := range digests { - if _, ok := seen[d]; ok { - continue - } - - seen[d] = struct{}{} - algs[d.Algorithm()] = append(algs[d.Algorithm()], d) - - if len(algs[d.Algorithm()]) > 1 { - return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm()) - } - - out = append(out, d) - } - - return out, nil -} - -// digestsMatch compares the two sets of digests to see if they match. -func digestsMatch(as, bs []digest.Digest) bool { - all := append(as, bs...) - - uniqified, err := uniqifyDigests(all...) - if err != nil { - // the only error uniqifyDigests returns is when the digests disagree. - return false - } - - disjoint := len(as) + len(bs) - if len(uniqified) == disjoint { - // if these two sets have the same cardinality, we know both sides - // didn't share any digests. - return false - } - - return true -} - -type digestSlice []digest.Digest - -func (p digestSlice) Len() int { return len(p) } -func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/containerd/continuity/digests_test.go b/vendor/github.com/containerd/continuity/digests_test.go deleted file mode 100644 index be7a497d..00000000 --- a/vendor/github.com/containerd/continuity/digests_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package continuity - -import ( - "fmt" - "reflect" - "testing" - - "github.com/opencontainers/go-digest" -) - -// TestUniqifyDigests ensures that we deterministically sort digest entries -// for a resource. -func TestUniqifyDigests(t *testing.T) { - for _, testcase := range []struct { - description string - input [][]digest.Digest // input is a series of digest collections from separate resources. - expected []digest.Digest - err error - }{ - { - description: "simple merge", - input: [][]digest.Digest{ - {"sha1:abc", "sha256:def"}, - {"sha1:abc", "sha256:def"}, - }, - expected: []digest.Digest{"sha1:abc", "sha256:def"}, - }, - { - description: "simple reversed order", - input: [][]digest.Digest{ - {"sha1:abc", "sha256:def"}, - {"sha256:def", "sha1:abc"}, - }, - expected: []digest.Digest{"sha1:abc", "sha256:def"}, - }, - { - description: "conflicting values for sha1", - input: [][]digest.Digest{ - {"sha1:abc", "sha256:def"}, - {"sha256:def", "sha1:def"}, - }, - err: fmt.Errorf("conflicting digests for sha1 found"), - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - var assembled []digest.Digest - - for _, ds := range testcase.input { - assembled = append(assembled, ds...) - } - - merged, err := uniqifyDigests(assembled...) - if err != testcase.err { - if testcase.err == nil { - fatalf("unexpected error uniqifying digests: %v", err) - } - - if err != testcase.err && err.Error() != testcase.err.Error() { - // compare by string till we create nice error type - fatalf("unexpected error uniqifying digests: %v != %v", err, testcase.err) - } - } - - if !reflect.DeepEqual(merged, testcase.expected) { - fatalf("unexpected uniquification: %v != %v", merged, testcase.expected) - } - - } -} diff --git a/vendor/github.com/containerd/continuity/groups_unix.go b/vendor/github.com/containerd/continuity/groups_unix.go deleted file mode 100644 index e15c14ff..00000000 --- a/vendor/github.com/containerd/continuity/groups_unix.go +++ /dev/null @@ -1,113 +0,0 @@ -package continuity - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// TODO(stevvooe): This needs a lot of work before we can call it useful. - -type groupIndex struct { - byName map[string]*group - byGID map[int]*group -} - -func getGroupIndex() (*groupIndex, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return nil, err - } - - return newGroupIndex(groups), nil -} - -func newGroupIndex(groups []group) *groupIndex { - gi := &groupIndex{ - byName: make(map[string]*group), - byGID: make(map[int]*group), - } - - for i, group := range groups { - gi.byGID[group.gid] = &groups[i] - gi.byName[group.name] = &groups[i] - } - - return gi -} - -type group struct { - name string - gid int - members []string -} - -func getGroupName(gid int) (string, error) { - f, err := os.Open("/etc/group") - if err != nil { - return "", err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return "", err - } - - for _, group := range groups { - if group.gid == gid { - return group.name, nil - } - } - - return "", fmt.Errorf("no group for gid") -} - -// parseGroups parses an /etc/group file for group names, ids and membership. -// This is unix specific. -func parseGroups(rd io.Reader) ([]group, error) { - var groups []group - scanner := bufio.NewScanner(rd) - - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "#") { - continue // skip comment - } - - parts := strings.SplitN(scanner.Text(), ":", 4) - - if len(parts) != 4 { - return nil, fmt.Errorf("bad entry: %q", scanner.Text()) - } - - name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3] - - gid, err := strconv.Atoi(sgid) - if err != nil { - return nil, fmt.Errorf("bad gid: %q", gid) - } - - members := strings.Split(smembers, ",") - - groups = append(groups, group{ - name: name, - gid: gid, - members: members, - }) - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - - return groups, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go deleted file mode 100644 index 8b39bd06..00000000 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ /dev/null @@ -1,57 +0,0 @@ -package continuity - -import ( - "fmt" - "os" -) - -var ( - errNotAHardLink = fmt.Errorf("invalid hardlink") -) - -type hardlinkManager struct { - hardlinks map[hardlinkKey][]Resource -} - -func newHardlinkManager() *hardlinkManager { - return &hardlinkManager{ - hardlinks: map[hardlinkKey][]Resource{}, - } -} - -// Add attempts to add the resource to the hardlink manager. If the resource -// cannot be considered as a hardlink candidate, errNotAHardLink is returned. -func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error { - if _, ok := resource.(Hardlinkable); !ok { - return errNotAHardLink - } - - key, err := newHardlinkKey(fi) - if err != nil { - return err - } - - hlm.hardlinks[key] = append(hlm.hardlinks[key], resource) - - return nil -} - -// Merge processes the current state of the hardlink manager and merges any -// shared nodes into hardlinked resources. -func (hlm *hardlinkManager) Merge() ([]Resource, error) { - var resources []Resource - for key, linked := range hlm.hardlinks { - if len(linked) < 1 { - return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key) - } - - merged, err := Merge(linked...) - if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) - } - - resources = append(resources, merged) - } - - return resources, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_unix.go b/vendor/github.com/containerd/continuity/hardlinks_unix.go deleted file mode 100644 index 1d81a3f9..00000000 --- a/vendor/github.com/containerd/continuity/hardlinks_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build linux darwin freebsd solaris - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// hardlinkKey provides a tuple-key for managing hardlinks. This is system- -// specific. -type hardlinkKey struct { - dev uint64 - inode uint64 -} - -// newHardlinkKey returns a hardlink key for the provided file info. If the -// resource does not represent a possible hardlink, errNotAHardLink will be -// returned. -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo") - } - - if sys.Nlink < 2 { - // NOTE(stevvooe): This is not always true for all filesystems. We - // should somehow detect this and provided a slow "polyfill" that - // leverages os.SameFile if we detect a filesystem where link counts - // is not really supported. - return hardlinkKey{}, errNotAHardLink - } - - return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_windows.go b/vendor/github.com/containerd/continuity/hardlinks_windows.go deleted file mode 100644 index be516c56..00000000 --- a/vendor/github.com/containerd/continuity/hardlinks_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package continuity - -import "os" - -type hardlinkKey struct{} - -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - // NOTE(stevvooe): Obviously, this is not yet implemented. However, the - // makings of an implementation are available in src/os/types_windows.go. More - // investigation needs to be done to figure out exactly how to do this. - return hardlinkKey{}, errNotAHardLink -} diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go deleted file mode 100644 index 0e2cc5fb..00000000 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ /dev/null @@ -1,39 +0,0 @@ -package continuity - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// atomicWriteFile writes data to a file by first writing to a temp -// file and calling rename. -func atomicWriteFile(filename string, r io.Reader, rf RegularFile) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return err - } - err = os.Chmod(f.Name(), rf.Mode()) - if err != nil { - f.Close() - return err - } - n, err := io.Copy(f, r) - if err == nil && n < rf.Size() { - f.Close() - return io.ErrShortWrite - } - if err != nil { - f.Close() - return err - } - if err := f.Sync(); err != nil { - f.Close() - return err - } - if err := f.Close(); err != nil { - return err - } - return os.Rename(f.Name(), filename) -} diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go deleted file mode 100644 index f704f048..00000000 --- a/vendor/github.com/containerd/continuity/manifest.go +++ /dev/null @@ -1,144 +0,0 @@ -package continuity - -import ( - "fmt" - "io" - "log" - "os" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" -) - -// Manifest provides the contents of a manifest. Users of this struct should -// not typically modify any fields directly. -type Manifest struct { - // Resources specifies all the resources for a manifest in order by path. - Resources []Resource -} - -func Unmarshal(p []byte) (*Manifest, error) { - var bm pb.Manifest - - if err := proto.Unmarshal(p, &bm); err != nil { - return nil, err - } - - var m Manifest - for _, b := range bm.Resource { - r, err := fromProto(b) - if err != nil { - return nil, err - } - - m.Resources = append(m.Resources, r) - } - - return &m, nil -} - -func Marshal(m *Manifest) ([]byte, error) { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.Marshal(&bm) -} - -func MarshalText(w io.Writer, m *Manifest) error { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.MarshalText(w, &bm) -} - -// BuildManifest creates the manifest for the given context -func BuildManifest(ctx Context) (*Manifest, error) { - resourcesByPath := map[string]Resource{} - hardlinks := newHardlinkManager() - - if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) - } - - if p == string(os.PathSeparator) { - // skip root - return nil - } - - resource, err := ctx.Resource(p, fi) - if err != nil { - if err == ErrNotFound { - return nil - } - log.Printf("error getting resource %q: %v", p, err) - return err - } - - // add to the hardlink manager - if err := hardlinks.Add(fi, resource); err == nil { - // Resource has been accepted by hardlink manager so we don't add - // it to the resourcesByPath until we merge at the end. - return nil - } else if err != errNotAHardLink { - // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) - } - - resourcesByPath[p] = resource - - return nil - }); err != nil { - return nil, err - } - - // merge and post-process the hardlinks. - hardlinked, err := hardlinks.Merge() - if err != nil { - return nil, err - } - - for _, resource := range hardlinked { - resourcesByPath[resource.Path()] = resource - } - - var resources []Resource - for _, resource := range resourcesByPath { - resources = append(resources, resource) - } - - sort.Stable(ByPath(resources)) - - return &Manifest{ - Resources: resources, - }, nil -} - -// VerifyManifest verifies all the resources in a manifest -// against files from the given context. -func VerifyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Verify(resource); err != nil { - return err - } - } - - return nil -} - -// ApplyManifest applies on the resources in a manifest to -// the given context. -func ApplyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Apply(resource); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/manifest_test.go b/vendor/github.com/containerd/continuity/manifest_test.go deleted file mode 100644 index 17259310..00000000 --- a/vendor/github.com/containerd/continuity/manifest_test.go +++ /dev/null @@ -1,420 +0,0 @@ -// +build !windows - -package continuity - -import ( - "bytes" - _ "crypto/sha256" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sort" - "syscall" - "testing" - - "github.com/containerd/continuity/devices" - "github.com/opencontainers/go-digest" -) - -// Hard things: -// 1. Groups/gid - no standard library support. -// 2. xattrs - must choose package to provide this. -// 3. ADS - no clue where to start. - -func TestWalkFS(t *testing.T) { - rand.Seed(1) - - // Testing: - // 1. Setup different files: - // - links - // - sibling directory - relative - // - sibling directory - absolute - // - parent directory - absolute - // - parent directory - relative - // - illegal links - // - parent directory - relative, out of root - // - parent directory - absolute, out of root - // - regular files - // - character devices - // - what about sticky bits? - // 2. Build the manifest. - // 3. Verify expected result. - testResources := []dresource{ - { - path: "a", - mode: 0644, - }, - { - kind: rhardlink, - path: "a-hardlink", - target: "a", - }, - { - kind: rdirectory, - path: "b", - mode: 0755, - }, - { - kind: rhardlink, - path: "b/a-hardlink", - target: "a", - }, - { - path: "b/a", - mode: 0600 | os.ModeSticky, - }, - { - kind: rdirectory, - path: "c", - mode: 0755, - }, - { - path: "c/a", - mode: 0644, - }, - { - kind: rrelsymlink, - path: "c/ca-relsymlink", - mode: 0600, - target: "a", - }, - { - kind: rrelsymlink, - path: "c/a-relsymlink", - mode: 0600, - target: "../a", - }, - { - kind: rabssymlink, - path: "c/a-abssymlink", - mode: 0600, - target: "a", - }, - // TODO(stevvooe): Make sure we can test this case and get proper - // errors when it is encountered. - // { - // // create a bad symlink and make sure we don't include it. - // kind: relsymlink, - // path: "c/a-badsymlink", - // mode: 0600, - // target: "../../..", - // }, - - // TODO(stevvooe): Must add tests for xattrs, with symlinks, - // directorys and regular files. - - { - kind: rnamedpipe, - path: "fifo", - mode: 0666 | os.ModeNamedPipe, - }, - - { - kind: rdirectory, - path: "/dev", - mode: 0755, - }, - - // NOTE(stevvooe): Below here, we add a few simple character devices. - // Block devices are untested but should be nearly the same as - // character devices. - // devNullResource, - // devZeroResource, - } - - root, err := ioutil.TempDir("", "continuity-test-") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - - defer os.RemoveAll(root) - - generateTestFiles(t, root, testResources) - - ctx, err := NewContext(root) - if err != nil { - t.Fatalf("error getting context: %v", err) - } - - m, err := BuildManifest(ctx) - if err != nil { - t.Fatalf("error building manifest: %v", err) - } - - var b bytes.Buffer - MarshalText(&b, m) - t.Log(b.String()) - - // TODO(dmcgowan): always verify, currently hard links not supported - //if err := VerifyManifest(ctx, m); err != nil { - // t.Fatalf("error verifying manifest: %v") - //} - - expectedResources, err := expectedResourceList(root, testResources) - if err != nil { - // TODO(dmcgowan): update function to panic, this would mean test setup error - t.Fatalf("error creating resource list: %v", err) - } - - // Diff resources - diff := diffResourceList(expectedResources, m.Resources) - if diff.HasDiff() { - t.Log("Resource list difference") - for _, a := range diff.Additions { - t.Logf("Unexpected resource: %#v", a) - } - for _, d := range diff.Deletions { - t.Logf("Missing resource: %#v", d) - } - for _, u := range diff.Updates { - t.Logf("Changed resource:\n\tExpected: %#v\n\tActual: %#v", u.Original, u.Updated) - } - - t.FailNow() - } -} - -// TODO(stevvooe): At this time, we have a nice testing framework to define -// and build resources. This will likely be a pre-cursor to the packages -// public interface. -type kind int - -func (k kind) String() string { - switch k { - case rfile: - return "file" - case rdirectory: - return "directory" - case rhardlink: - return "hardlink" - case rchardev: - return "chardev" - case rnamedpipe: - return "namedpipe" - } - - panic(fmt.Sprintf("unknown kind: %v", int(k))) -} - -const ( - rfile kind = iota - rdirectory - rhardlink - rrelsymlink - rabssymlink - rchardev - rnamedpipe -) - -type dresource struct { - kind kind - path string - mode os.FileMode - target string // hard/soft link target - digest digest.Digest - size int - uid int64 - gid int64 - major, minor int -} - -func generateTestFiles(t *testing.T, root string, resources []dresource) { - for i, resource := range resources { - p := filepath.Join(root, resource.path) - switch resource.kind { - case rfile: - size := rand.Intn(4 << 20) - d := make([]byte, size) - randomBytes(d) - dgst := digest.FromBytes(d) - resources[i].digest = dgst - resources[i].size = size - - // this relies on the proper directory parent being defined. - if err := ioutil.WriteFile(p, d, resource.mode); err != nil { - t.Fatalf("error writing %q: %v", p, err) - } - case rdirectory: - if err := os.Mkdir(p, resource.mode); err != nil { - t.Fatalf("error creating directory %q: %v", p, err) - } - case rhardlink: - target := filepath.Join(root, resource.target) - if err := os.Link(target, p); err != nil { - t.Fatalf("error creating hardlink: %v", err) - } - case rrelsymlink: - if err := os.Symlink(resource.target, p); err != nil { - t.Fatalf("error creating symlink: %v", err) - } - case rabssymlink: - // for absolute links, we join with root. - target := filepath.Join(root, resource.target) - - if err := os.Symlink(target, p); err != nil { - t.Fatalf("error creating symlink: %v", err) - } - case rchardev, rnamedpipe: - if err := devices.Mknod(p, resource.mode, resource.major, resource.minor); err != nil { - t.Fatalf("error creating device %q: %v", p, err) - } - default: - t.Fatalf("unknown resource type: %v", resource.kind) - } - - st, err := os.Lstat(p) - if err != nil { - t.Fatalf("error statting after creation: %v", err) - } - resources[i].uid = int64(st.Sys().(*syscall.Stat_t).Uid) - resources[i].gid = int64(st.Sys().(*syscall.Stat_t).Gid) - resources[i].mode = st.Mode() - - // TODO: Readback and join xattr - } - - // log the test root for future debugging - if err := filepath.Walk(root, func(p string, fi os.FileInfo, err error) error { - if fi.Mode()&os.ModeSymlink != 0 { - target, err := os.Readlink(p) - if err != nil { - return err - } - t.Log(fi.Mode(), p, "->", target) - } else { - t.Log(fi.Mode(), p) - } - - return nil - }); err != nil { - t.Fatalf("error walking created root: %v", err) - } - - var b bytes.Buffer - if err := tree(&b, root); err != nil { - t.Fatalf("error running tree: %v", err) - } - t.Logf("\n%s", b.String()) -} - -func randomBytes(p []byte) { - for i := range p { - p[i] = byte(rand.Intn(1<<8 - 1)) - } -} - -// expectedResourceList sorts the set of resources into the order -// expected in the manifest and collapses hardlinks -func expectedResourceList(root string, resources []dresource) ([]Resource, error) { - resourceMap := map[string]Resource{} - paths := []string{} - for _, r := range resources { - absPath := r.path - if !filepath.IsAbs(absPath) { - absPath = "/" + absPath - } - switch r.kind { - case rfile: - f := ®ularFile{ - resource: resource{ - paths: []string{absPath}, - mode: r.mode, - uid: r.uid, - gid: r.gid, - }, - size: int64(r.size), - digests: []digest.Digest{r.digest}, - } - resourceMap[absPath] = f - paths = append(paths, absPath) - case rdirectory: - d := &directory{ - resource: resource{ - paths: []string{absPath}, - mode: r.mode, - uid: r.uid, - gid: r.gid, - }, - } - resourceMap[absPath] = d - paths = append(paths, absPath) - case rhardlink: - targetPath := r.target - if !filepath.IsAbs(targetPath) { - targetPath = "/" + targetPath - } - target, ok := resourceMap[targetPath] - if !ok { - return nil, errors.New("must specify target before hardlink for test resources") - } - rf, ok := target.(*regularFile) - if !ok { - return nil, errors.New("hardlink target must be regular file") - } - // TODO(dmcgowan): full merge - rf.paths = append(rf.paths, absPath) - // TODO(dmcgowan): check if first path is now different, changes source order and should update - // resource map key, to avoid canonically ordered first should be regular file - sort.Stable(sort.StringSlice(rf.paths)) - case rrelsymlink, rabssymlink: - targetPath := r.target - if r.kind == rabssymlink && !filepath.IsAbs(r.target) { - // for absolute links, we join with root. - targetPath = filepath.Join(root, targetPath) - } - s := &symLink{ - resource: resource{ - paths: []string{absPath}, - mode: r.mode, - uid: r.uid, - gid: r.gid, - }, - target: targetPath, - } - resourceMap[absPath] = s - paths = append(paths, absPath) - case rchardev: - d := &device{ - resource: resource{ - paths: []string{absPath}, - mode: r.mode, - uid: r.uid, - gid: r.gid, - }, - major: uint64(r.major), - minor: uint64(r.minor), - } - resourceMap[absPath] = d - paths = append(paths, absPath) - case rnamedpipe: - p := &namedPipe{ - resource: resource{ - paths: []string{absPath}, - mode: r.mode, - uid: r.uid, - gid: r.gid, - }, - } - resourceMap[absPath] = p - paths = append(paths, absPath) - default: - return nil, fmt.Errorf("unknown resource type: %v", r.kind) - } - } - - if len(resourceMap) < len(paths) { - return nil, errors.New("resource list has duplicated paths") - } - - sort.Strings(paths) - - manifestResources := make([]Resource, len(paths)) - for i, p := range paths { - manifestResources[i] = resourceMap[p] - } - - return manifestResources, nil -} diff --git a/vendor/github.com/containerd/continuity/manifest_test_darwin.go b/vendor/github.com/containerd/continuity/manifest_test_darwin.go deleted file mode 100644 index 873ec318..00000000 --- a/vendor/github.com/containerd/continuity/manifest_test_darwin.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build ignore - -package continuity - -import "os" - -var ( - devNullResource = resource{ - kind: chardev, - path: "/dev/null", - major: 3, - minor: 2, - mode: 0666 | os.ModeDevice | os.ModeCharDevice, - } - - devZeroResource = resource{ - kind: chardev, - path: "/dev/zero", - major: 3, - minor: 3, - mode: 0666 | os.ModeDevice | os.ModeCharDevice, - } -) diff --git a/vendor/github.com/containerd/continuity/pathdriver/BUILD.bazel b/vendor/github.com/containerd/continuity/pathdriver/BUILD.bazel deleted file mode 100644 index 60cd8431..00000000 --- a/vendor/github.com/containerd/continuity/pathdriver/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["path_driver.go"], - importpath = "github.com/containerd/continuity/pathdriver", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go index b43d55fe..b0d5a6b5 100644 --- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package pathdriver import ( diff --git a/vendor/github.com/containerd/continuity/resource.go b/vendor/github.com/containerd/continuity/resource.go deleted file mode 100644 index 3643effb..00000000 --- a/vendor/github.com/containerd/continuity/resource.go +++ /dev/null @@ -1,574 +0,0 @@ -package continuity - -import ( - "errors" - "fmt" - "os" - "reflect" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): A record based model, somewhat sketched out at the bottom -// of this file, will be more flexible. Another possibly is to tie the package -// interface directly to the protobuf type. This will have efficiency -// advantages at the cost coupling the nasty codegen types to the exported -// interface. - -type Resource interface { - // Path provides the primary resource path relative to the bundle root. In - // cases where resources have more than one path, such as with hard links, - // this will return the primary path, which is often just the first entry. - Path() string - - // Mode returns the - Mode() os.FileMode - - UID() int64 - GID() int64 -} - -// ByPath provides the canonical sort order for a set of resources. Use with -// sort.Stable for deterministic sorting. -type ByPath []Resource - -func (bp ByPath) Len() int { return len(bp) } -func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } -func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() } - -type XAttrer interface { - XAttrs() map[string][]byte -} - -// Hardlinkable is an interface that a resource type satisfies if it can be a -// hardlink target. -type Hardlinkable interface { - // Paths returns all paths of the resource, including the primary path - // returned by Resource.Path. If len(Paths()) > 1, the resource is a hard - // link. - Paths() []string -} - -type RegularFile interface { - Resource - XAttrer - Hardlinkable - - Size() int64 - Digests() []digest.Digest -} - -// Merge two or more Resources into new file. Typically, this should be -// used to merge regular files as hardlinks. If the files are not identical, -// other than Paths and Digests, the merge will fail and an error will be -// returned. -func Merge(fs ...Resource) (Resource, error) { - if len(fs) < 1 { - return nil, fmt.Errorf("please provide a resource to merge") - } - - if len(fs) == 1 { - return fs[0], nil - } - - var paths []string - var digests []digest.Digest - bypath := map[string][]Resource{} - - // The attributes are all compared against the first to make sure they - // agree before adding to the above collections. If any of these don't - // correctly validate, the merge fails. - prototype := fs[0] - xattrs := make(map[string][]byte) - - // initialize xattrs for use below. All files must have same xattrs. - if prototypeXAttrer, ok := prototype.(XAttrer); ok { - for attr, value := range prototypeXAttrer.XAttrs() { - xattrs[attr] = value - } - } - - for _, f := range fs { - h, isHardlinkable := f.(Hardlinkable) - if !isHardlinkable { - return nil, errNotAHardLink - } - - if f.Mode() != prototype.Mode() { - return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode()) - } - - if f.UID() != prototype.UID() { - return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID()) - } - - if f.GID() != prototype.GID() { - return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID()) - } - - if xattrer, ok := f.(XAttrer); ok { - fxattrs := xattrer.XAttrs() - if !reflect.DeepEqual(fxattrs, xattrs) { - return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs) - } - } - - for _, p := range h.Paths() { - pfs, ok := bypath[p] - if !ok { - // ensure paths are unique by only appending on a new path. - paths = append(paths, p) - } - - bypath[p] = append(pfs, f) - } - - if regFile, isRegFile := f.(RegularFile); isRegFile { - prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile) - if !prototypeIsRegFile { - return nil, errors.New("prototype is not a regular file") - } - - if regFile.Size() != prototypeRegFile.Size() { - return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size()) - } - - digests = append(digests, regFile.Digests()...) - } else if device, isDevice := f.(Device); isDevice { - prototypeDevice, prototypeIsDevice := prototype.(Device) - if !prototypeIsDevice { - return nil, errors.New("prototype is not a device") - } - - if device.Major() != prototypeDevice.Major() { - return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major()) - } - if device.Minor() != prototypeDevice.Minor() { - return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor()) - } - } else if _, isNamedPipe := f.(NamedPipe); isNamedPipe { - _, prototypeIsNamedPipe := prototype.(NamedPipe) - if !prototypeIsNamedPipe { - return nil, errors.New("prototype is not a named pipe") - } - } else { - return nil, errNotAHardLink - } - } - - sort.Stable(sort.StringSlice(paths)) - - // Choose a "canonical" file. Really, it is just the first file to sort - // against. We also effectively select the very first digest as the - // "canonical" one for this file. - first := bypath[paths[0]][0] - - resource := resource{ - paths: paths, - mode: first.Mode(), - uid: first.UID(), - gid: first.GID(), - xattrs: xattrs, - } - - switch typedF := first.(type) { - case RegularFile: - var err error - digests, err = uniqifyDigests(digests...) - if err != nil { - return nil, err - } - - return ®ularFile{ - resource: resource, - size: typedF.Size(), - digests: digests, - }, nil - case Device: - return &device{ - resource: resource, - major: typedF.Major(), - minor: typedF.Minor(), - }, nil - - case NamedPipe: - return &namedPipe{ - resource: resource, - }, nil - - default: - return nil, errNotAHardLink - } -} - -type Directory interface { - Resource - XAttrer - - // Directory is a no-op method to identify directory objects by interface. - Directory() -} - -type SymLink interface { - Resource - - // Target returns the target of the symlink contained in the . - Target() string -} - -type NamedPipe interface { - Resource - Hardlinkable - XAttrer - - // Pipe is a no-op method to allow consistent resolution of NamedPipe - // interface. - Pipe() -} - -type Device interface { - Resource - Hardlinkable - XAttrer - - Major() uint64 - Minor() uint64 -} - -type resource struct { - paths []string - mode os.FileMode - uid, gid int64 - xattrs map[string][]byte -} - -var _ Resource = &resource{} - -func (r *resource) Path() string { - if len(r.paths) < 1 { - return "" - } - - return r.paths[0] -} - -func (r *resource) Mode() os.FileMode { - return r.mode -} - -func (r *resource) UID() int64 { - return r.uid -} - -func (r *resource) GID() int64 { - return r.gid -} - -type regularFile struct { - resource - size int64 - digests []digest.Digest -} - -var _ RegularFile = ®ularFile{} - -// newRegularFile returns the RegularFile, using the populated base resource -// and one or more digests of the content. -func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) { - if !base.Mode().IsRegular() { - return nil, fmt.Errorf("not a regular file") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - // make our own copy of digests - ds := make([]digest.Digest, len(dgsts)) - copy(ds, dgsts) - - return ®ularFile{ - resource: base, - size: size, - digests: ds, - }, nil -} - -func (rf *regularFile) Paths() []string { - paths := make([]string, len(rf.paths)) - copy(paths, rf.paths) - return paths -} - -func (rf *regularFile) Size() int64 { - return rf.size -} - -func (rf *regularFile) Digests() []digest.Digest { - digests := make([]digest.Digest, len(rf.digests)) - copy(digests, rf.digests) - return digests -} - -func (rf *regularFile) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(rf.xattrs)) - - for attr, value := range rf.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type directory struct { - resource -} - -var _ Directory = &directory{} - -func newDirectory(base resource) (Directory, error) { - if !base.Mode().IsDir() { - return nil, fmt.Errorf("not a directory") - } - - return &directory{ - resource: base, - }, nil -} - -func (d *directory) Directory() {} - -func (d *directory) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type symLink struct { - resource - target string -} - -var _ SymLink = &symLink{} - -func newSymLink(base resource, target string) (SymLink, error) { - if base.Mode()&os.ModeSymlink == 0 { - return nil, fmt.Errorf("not a symlink") - } - - return &symLink{ - resource: base, - target: target, - }, nil -} - -func (l *symLink) Target() string { - return l.target -} - -type namedPipe struct { - resource -} - -var _ NamedPipe = &namedPipe{} - -func newNamedPipe(base resource, paths []string) (NamedPipe, error) { - if base.Mode()&os.ModeNamedPipe == 0 { - return nil, fmt.Errorf("not a namedpipe") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &namedPipe{ - resource: base, - }, nil -} - -func (np *namedPipe) Pipe() {} - -func (np *namedPipe) Paths() []string { - paths := make([]string, len(np.paths)) - copy(paths, np.paths) - return paths -} - -func (np *namedPipe) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(np.xattrs)) - - for attr, value := range np.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type device struct { - resource - major, minor uint64 -} - -var _ Device = &device{} - -func newDevice(base resource, paths []string, major, minor uint64) (Device, error) { - if base.Mode()&os.ModeDevice == 0 { - return nil, fmt.Errorf("not a device") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &device{ - resource: base, - major: major, - minor: minor, - }, nil -} - -func (d *device) Paths() []string { - paths := make([]string, len(d.paths)) - copy(paths, d.paths) - return paths -} - -func (d *device) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -func (d device) Major() uint64 { - return d.major -} - -func (d device) Minor() uint64 { - return d.minor -} - -// toProto converts a resource to a protobuf record. We'd like to push this -// the individual types but we want to keep this all together during -// prototyping. -func toProto(resource Resource) *pb.Resource { - b := &pb.Resource{ - Path: []string{resource.Path()}, - Mode: uint32(resource.Mode()), - Uid: resource.UID(), - Gid: resource.GID(), - } - - if xattrer, ok := resource.(XAttrer); ok { - // Sorts the XAttrs by name for consistent ordering. - keys := []string{} - xattrs := xattrer.XAttrs() - for k := range xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]}) - } - } - - switch r := resource.(type) { - case RegularFile: - b.Path = r.Paths() - b.Size = uint64(r.Size()) - - for _, dgst := range r.Digests() { - b.Digest = append(b.Digest, dgst.String()) - } - case SymLink: - b.Target = r.Target() - case Device: - b.Major, b.Minor = r.Major(), r.Minor() - b.Path = r.Paths() - case NamedPipe: - b.Path = r.Paths() - } - - // enforce a few stability guarantees that may not be provided by the - // resource implementation. - sort.Strings(b.Path) - - return b -} - -// fromProto converts from a protobuf Resource to a Resource interface. -func fromProto(b *pb.Resource) (Resource, error) { - base := &resource{ - paths: b.Path, - mode: os.FileMode(b.Mode), - uid: b.Uid, - gid: b.Gid, - } - - base.xattrs = make(map[string][]byte, len(b.Xattr)) - - for _, attr := range b.Xattr { - base.xattrs[attr.Name] = attr.Data - } - - switch { - case base.Mode().IsRegular(): - dgsts := make([]digest.Digest, len(b.Digest)) - for i, dgst := range b.Digest { - // TODO(stevvooe): Should we be validating at this point? - dgsts[i] = digest.Digest(dgst) - } - - return newRegularFile(*base, b.Path, int64(b.Size), dgsts...) - case base.Mode().IsDir(): - return newDirectory(*base) - case base.Mode()&os.ModeSymlink != 0: - return newSymLink(*base, b.Target) - case base.Mode()&os.ModeNamedPipe != 0: - return newNamedPipe(*base, b.Path) - case base.Mode()&os.ModeDevice != 0: - return newDevice(*base, b.Path, b.Major, b.Minor) - } - - return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) -} - -// NOTE(stevvooe): An alternative model that supports inline declaration. -// Convenient for unit testing where inline declarations may be desirable but -// creates an awkward API for the standard use case. - -// type ResourceKind int - -// const ( -// ResourceRegularFile = iota + 1 -// ResourceDirectory -// ResourceSymLink -// Resource -// ) - -// type Resource struct { -// Kind ResourceKind -// Paths []string -// Mode os.FileMode -// UID string -// GID string -// Size int64 -// Digests []digest.Digest -// Target string -// Major, Minor int -// XAttrs map[string][]byte -// } - -// type RegularFile struct { -// Paths []string -// Size int64 -// Digests []digest.Digest -// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid -// } diff --git a/vendor/github.com/containerd/continuity/resource_test.go b/vendor/github.com/containerd/continuity/resource_test.go deleted file mode 100644 index 4f2cd95a..00000000 --- a/vendor/github.com/containerd/continuity/resource_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package continuity - -type resourceUpdate struct { - Original Resource - Updated Resource -} - -type resourceListDifference struct { - Additions []Resource - Deletions []Resource - Updates []resourceUpdate -} - -func (l resourceListDifference) HasDiff() bool { - return len(l.Additions) > 0 || len(l.Deletions) > 0 || len(l.Updates) > 0 -} - -// diffManifest compares two resource lists and returns the list -// of adds updates and deletes, resource lists are not reordered -// before doing difference. -func diffResourceList(r1, r2 []Resource) resourceListDifference { - i1 := 0 - i2 := 0 - var d resourceListDifference - - for i1 < len(r1) && i2 < len(r2) { - p1 := r1[i1].Path() - p2 := r2[i2].Path() - switch { - case p1 < p2: - d.Deletions = append(d.Deletions, r1[i1]) - i1++ - case p1 == p2: - if !compareResource(r1[i1], r2[i2]) { - d.Updates = append(d.Updates, resourceUpdate{ - Original: r1[i1], - Updated: r2[i2], - }) - } - i1++ - i2++ - case p1 > p2: - d.Additions = append(d.Additions, r2[i2]) - i2++ - } - } - - for i1 < len(r1) { - d.Deletions = append(d.Deletions, r1[i1]) - i1++ - - } - for i2 < len(r2) { - d.Additions = append(d.Additions, r2[i2]) - i2++ - } - - return d -} - -func compareResource(r1, r2 Resource) bool { - if r1.Path() != r2.Path() { - return false - } - if r1.Mode() != r2.Mode() { - return false - } - if r1.UID() != r2.UID() { - return false - } - if r1.GID() != r2.GID() { - return false - } - - // TODO(dmcgowan): Check if is XAttrer - - switch t1 := r1.(type) { - case RegularFile: - t2, ok := r2.(RegularFile) - if !ok { - return false - } - return compareRegularFile(t1, t2) - case Directory: - t2, ok := r2.(Directory) - if !ok { - return false - } - return compareDirectory(t1, t2) - case SymLink: - t2, ok := r2.(SymLink) - if !ok { - return false - } - return compareSymLink(t1, t2) - case NamedPipe: - t2, ok := r2.(NamedPipe) - if !ok { - return false - } - return compareNamedPipe(t1, t2) - case Device: - t2, ok := r2.(Device) - if !ok { - return false - } - return compareDevice(t1, t2) - default: - // TODO(dmcgowan): Should this panic? - return r1 == r2 - } -} - -func compareRegularFile(r1, r2 RegularFile) bool { - if r1.Size() != r2.Size() { - return false - } - p1 := r1.Paths() - p2 := r2.Paths() - if len(p1) != len(p2) { - return false - } - for i := range p1 { - if p1[i] != p2[i] { - return false - } - } - d1 := r1.Digests() - d2 := r2.Digests() - if len(d1) != len(d2) { - return false - } - for i := range d1 { - if d1[i] != d2[i] { - return false - } - } - - return true -} - -func compareSymLink(r1, r2 SymLink) bool { - return r1.Target() == r2.Target() -} - -func compareDirectory(r1, r2 Directory) bool { - return true -} - -func compareNamedPipe(r1, r2 NamedPipe) bool { - return true -} - -func compareDevice(r1, r2 Device) bool { - return r1.Major() == r2.Major() && r1.Minor() == r2.Minor() -} diff --git a/vendor/github.com/containerd/continuity/resource_unix.go b/vendor/github.com/containerd/continuity/resource_unix.go deleted file mode 100644 index 4144643e..00000000 --- a/vendor/github.com/containerd/continuity/resource_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build linux darwin freebsd solaris - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - // TODO(stevvooe): This need to be resolved for the container's root, - // where here we are really getting the host OS's value. We need to allow - // this be passed in and fixed up to make these uid/gid mappings portable. - // Either this can be part of the driver or we can achieve it through some - // other mechanism. - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - // TODO(stevvooe): This may not be a hard error for all platforms. We - // may want to move this to the driver. - return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi) - } - - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - - uid: int64(sys.Uid), - gid: int64(sys.Gid), - - // NOTE(stevvooe): Population of shared xattrs field is deferred to - // the resource types that populate it. Since they are a property of - // the context, they must set there. - }, nil -} diff --git a/vendor/github.com/containerd/continuity/resource_windows.go b/vendor/github.com/containerd/continuity/resource_windows.go deleted file mode 100644 index 7b44414a..00000000 --- a/vendor/github.com/containerd/continuity/resource_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package continuity - -import "os" - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - }, nil -} diff --git a/vendor/github.com/containerd/continuity/testutil_test.go b/vendor/github.com/containerd/continuity/testutil_test.go deleted file mode 100644 index 72aeb05b..00000000 --- a/vendor/github.com/containerd/continuity/testutil_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package continuity - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -func tree(w io.Writer, dir string) error { - fmt.Fprintf(w, "%s\n", dir) - return _tree(w, dir, "") -} - -func _tree(w io.Writer, dir string, indent string) error { - files, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - for i, f := range files { - fPath := filepath.Join(dir, f.Name()) - fIndent := indent - if i < len(files)-1 { - fIndent += "|-- " - } else { - fIndent += "`-- " - } - target := "" - if f.Mode()&os.ModeSymlink == os.ModeSymlink { - realPath, err := os.Readlink(fPath) - if err != nil { - target += fmt.Sprintf(" -> unknown (error: %v)", err) - } else { - target += " -> " + realPath - } - } - fmt.Fprintf(w, "%s%s%s\n", - fIndent, f.Name(), target) - if f.IsDir() { - dIndent := indent - if i < len(files)-1 { - dIndent += "| " - } else { - dIndent += " " - } - if err := _tree(w, fPath, dIndent); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/containerd/continuity/vendor.conf b/vendor/github.com/containerd/continuity/vendor.conf deleted file mode 100644 index 7c80deec..00000000 --- a/vendor/github.com/containerd/continuity/vendor.conf +++ /dev/null @@ -1,13 +0,0 @@ -bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748 -github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626 -github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf -github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265 -github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b -github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489 -github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea -golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94 -golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2 -golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924 diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 4cf7888e..00000000 --- a/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace -.idea/* diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d9910601..00000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/BUILD.bazel b/vendor/github.com/docker/distribution/BUILD.bazel deleted file mode 100644 index bacf10b1..00000000 --- a/vendor/github.com/docker/distribution/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "blobs.go", - "doc.go", - "errors.go", - "manifests.go", - "registry.go", - "tags.go", - ], - importpath = "github.com/docker/distribution", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - ], -) diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index 2981d016..00000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry --version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index e7b16b3c..00000000 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,108 +0,0 @@ -# Changelog - -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 4c067d9e..00000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index e0d74684..00000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM golang:1.10-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -ARG GOOS=linux -ARG GOARCH=amd64 - -RUN set -ex \ - && apk add --no-cache make git - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda40015..00000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index 7c6f9c7a..00000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,99 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Go files -GOFILES=$(shell find . -type f -name '*.go') - -# Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -VNDR=$(shell which vndr || echo '') - -${PREFIX}/bin/registry: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-validate: - @echo "+ $@" - $(if $(VNDR), , \ - $(error Please install vndr: go get github.com/lk4d4/vndr)) - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @$(VNDR) - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf vendor - @mv .vendor.bak vendor diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index 99887885..00000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a8..00000000 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127af..00000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 145b0785..00000000 --- a/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,257 +0,0 @@ -package distribution - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml deleted file mode 100644 index 085ef4f7..00000000 --- a/vendor/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,94 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.10 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/lk4d4/vndr: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - git fetch origin: - pwd: $BASE_STABLE - - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae..00000000 --- a/vendor/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digestset/BUILD.bazel b/vendor/github.com/docker/distribution/digestset/BUILD.bazel deleted file mode 100644 index b594fe7a..00000000 --- a/vendor/github.com/docker/distribution/digestset/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["set.go"], - importpath = "github.com/docker/distribution/digestset", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/opencontainers/go-digest:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["set_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/distribution/digestset", - deps = ["//vendor/github.com/opencontainers/go-digest:go_default_library"], -) diff --git a/vendor/github.com/docker/distribution/digestset/set_test.go b/vendor/github.com/docker/distribution/digestset/set_test.go deleted file mode 100644 index 67d46c62..00000000 --- a/vendor/github.com/docker/distribution/digestset/set_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package digestset - -import ( - "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "math/rand" - "testing" - - digest "github.com/opencontainers/go-digest" -) - -func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) { - if d1 != d2 { - t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) - } -} - -func TestLookup(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup("54") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[3]) - - dgst, err = dset.Lookup("1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: 1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("9876") - if err == nil { - t.Fatal("Expected not found error looking up: 9876") - } - if err != ErrDigestNotFound { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:1234") - if err == nil { - t.Fatal("Expected ambiguous error looking up: sha256:1234") - } - if err != ErrDigestAmbiguous { - t.Fatal(err) - } - - dgst, err = dset.Lookup("sha256:12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) - - dgst, err = dset.Lookup("sha256:12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12346") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[2]) - - dgst, err = dset.Lookup("12345") - if err != nil { - t.Fatal(err) - } - assertEqualDigests(t, dgst, digests[0]) -} - -func TestAddDuplication(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - if len(dset.entries) != 8 { - t.Fatal("Invalid dset size") - } - - if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 8 { - t.Fatal("Duplicate digest insert should not increase entries size") - } - - if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { - t.Fatal(err) - } - - if len(dset.entries) != 9 { - t.Fatal("Insert with different algorithm should be allowed") - } -} - -func TestRemove(t *testing.T) { - digests, err := createDigests(10) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dgst, err := dset.Lookup(digests[0].String()) - if err != nil { - t.Fatal(err) - } - if dgst != digests[0] { - t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) - } - - if err := dset.Remove(digests[0]); err != nil { - t.Fatal(err) - } - - if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { - t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) - } -} - -func TestAll(t *testing.T) { - digests, err := createDigests(100) - if err != nil { - t.Fatal(err) - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - all := map[digest.Digest]struct{}{} - for _, dgst := range dset.All() { - all[dgst] = struct{}{} - } - - if len(all) != len(digests) { - t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) - } - - for i, dgst := range digests { - if _, ok := all[dgst]; !ok { - t.Fatalf("Missing element at position %d: %s", i, dgst) - } - } - -} - -func assertEqualShort(t *testing.T, actual, expected string) { - if actual != expected { - t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) - } -} - -func TestShortCodeTable(t *testing.T) { - digests := []digest.Digest{ - "sha256:1234111111111111111111111111111111111111111111111111111111111111", - "sha256:1234511111111111111111111111111111111111111111111111111111111111", - "sha256:1234611111111111111111111111111111111111111111111111111111111111", - "sha256:5432111111111111111111111111111111111111111111111111111111111111", - "sha256:6543111111111111111111111111111111111111111111111111111111111111", - "sha256:6432111111111111111111111111111111111111111111111111111111111111", - "sha256:6542111111111111111111111111111111111111111111111111111111111111", - "sha256:6532111111111111111111111111111111111111111111111111111111111111", - } - - dset := NewSet() - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - t.Fatal(err) - } - } - - dump := ShortCodeTable(dset, 2) - - if len(dump) < len(digests) { - t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) - } - assertEqualShort(t, dump[digests[0]], "12341") - assertEqualShort(t, dump[digests[1]], "12345") - assertEqualShort(t, dump[digests[2]], "12346") - assertEqualShort(t, dump[digests[3]], "54") - assertEqualShort(t, dump[digests[4]], "6543") - assertEqualShort(t, dump[digests[5]], "64") - assertEqualShort(t, dump[digests[6]], "6542") - assertEqualShort(t, dump[digests[7]], "653") -} - -func createDigests(count int) ([]digest.Digest, error) { - r := rand.New(rand.NewSource(25823)) - digests := make([]digest.Digest, count) - for i := range digests { - h := sha256.New() - if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { - return nil, err - } - digests[i] = digest.NewDigest("sha256", h) - } - return digests, nil -} - -func benchAddNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchLookupNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - shorts := make([]string, 0, n) - for _, short := range ShortCodeTable(dset, shortLen) { - shorts = append(shorts, short) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err = dset.Lookup(shorts[i%n]); err != nil { - b.Fatal(err) - } - } -} - -func benchRemoveNTable(b *testing.B, n int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - b.StopTimer() - for j := range digests { - if err = dset.Add(digests[j]); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for j := range digests { - if err = dset.Remove(digests[j]); err != nil { - b.Fatal(err) - } - } - } -} - -func benchShortCodeNTable(b *testing.B, n int, shortLen int) { - digests, err := createDigests(n) - if err != nil { - b.Fatal(err) - } - dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} - for i := range digests { - if err := dset.Add(digests[i]); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ShortCodeTable(dset, shortLen) - } -} - -func BenchmarkAdd10(b *testing.B) { - benchAddNTable(b, 10) -} - -func BenchmarkAdd100(b *testing.B) { - benchAddNTable(b, 100) -} - -func BenchmarkAdd1000(b *testing.B) { - benchAddNTable(b, 1000) -} - -func BenchmarkRemove10(b *testing.B) { - benchRemoveNTable(b, 10) -} - -func BenchmarkRemove100(b *testing.B) { - benchRemoveNTable(b, 100) -} - -func BenchmarkRemove1000(b *testing.B) { - benchRemoveNTable(b, 1000) -} - -func BenchmarkLookup10(b *testing.B) { - benchLookupNTable(b, 10, 12) -} - -func BenchmarkLookup100(b *testing.B) { - benchLookupNTable(b, 100, 12) -} - -func BenchmarkLookup1000(b *testing.B) { - benchLookupNTable(b, 1000, 12) -} - -func BenchmarkShortCode10(b *testing.B) { - benchShortCodeNTable(b, 10, 12) -} -func BenchmarkShortCode100(b *testing.B) { - benchShortCodeNTable(b, 100, 12) -} -func BenchmarkShortCode1000(b *testing.B) { - benchShortCodeNTable(b, 1000, 12) -} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb70..00000000 --- a/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 020d3325..00000000 --- a/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 1816baea..00000000 --- a/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "context" - "fmt" - "mime" - - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/vendor/github.com/docker/distribution/reference/BUILD.bazel b/vendor/github.com/docker/distribution/reference/BUILD.bazel deleted file mode 100644 index 0617e955..00000000 --- a/vendor/github.com/docker/distribution/reference/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "normalize.go", - "reference.go", - "regexp.go", - ], - importpath = "github.com/docker/distribution/reference", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/distribution/digestset:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "normalize_test.go", - "reference_test.go", - "regexp_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/distribution/reference", - deps = [ - "//vendor/github.com/docker/distribution/digestset:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - ], -) diff --git a/vendor/github.com/docker/distribution/reference/normalize_test.go b/vendor/github.com/docker/distribution/reference/normalize_test.go deleted file mode 100644 index a881972a..00000000 --- a/vendor/github.com/docker/distribution/reference/normalize_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package reference - -import ( - "strconv" - "testing" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // This test case was moved from invalid to valid since it is valid input - // when specified with a hostname, it removes the ambiguity from about - // whether the value is an identifier or repository name - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNormalizedNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNormalizedNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNormalizedNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNormalizedNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, FamiliarName, FullName, AmbiguousName, Domain string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - FamiliarName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Domain: "docker.io", - }, - { - RemoteName: "library/ubuntu", - FamiliarName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Domain: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - FamiliarName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "other/library", - FamiliarName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - FamiliarName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "privatebase", - FamiliarName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "privatebasee", - FamiliarName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - FamiliarName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Domain: "docker.io", - }, - { - RemoteName: "library/foo", - FamiliarName: "foo", - FullName: "docker.io/library/foo", - AmbiguousName: "docker.io/foo", - Domain: "docker.io", - }, - { - RemoteName: "library/foo/bar", - FamiliarName: "library/foo/bar", - FullName: "docker.io/library/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "store/foo/bar", - FamiliarName: "store/foo/bar", - FullName: "docker.io/store/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.FamiliarName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNormalizedNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.String(); expected != actual { - t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Domain, Domain(r); expected != actual { - t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, Path(r); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" - ref, err := ParseNormalizedNamed(shortRef) - if err != nil { - t.Fatal(err) - } - if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } - - if _, isTagged := ref.(NamedTagged); !isTagged { - t.Fatalf("Reference from %q should support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should support digest", ref) - } - if expected, actual := shortRef, FamiliarString(ref); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := ParseNormalizedNamed("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := ParseNormalizedNamed("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithDigest to detect invalid digest") - } -} - -func equalReference(r1, r2 Reference) bool { - switch v1 := r1.(type) { - case digestReference: - if v2, ok := r2.(digestReference); ok { - return v1 == v2 - } - case repository: - if v2, ok := r2.(repository); ok { - return v1 == v2 - } - case taggedReference: - if v2, ok := r2.(taggedReference); ok { - return v1 == v2 - } - case canonicalReference: - if v2, ok := r2.(canonicalReference); ok { - return v1 == v2 - } - case reference: - if v2, ok := r2.(reference); ok { - return v1 == v2 - } - } - return false -} - -func TestParseAnyReference(t *testing.T) { - tcases := []struct { - Reference string - Equivalent string - Expected Reference - Digests []digest.Digest - }{ - { - Reference: "redis", - Equivalent: "docker.io/library/redis", - }, - { - Reference: "redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "docker.io/library/redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dmcgowan/myapp", - Equivalent: "docker.io/dmcgowan/myapp", - }, - { - Reference: "dmcgowan/myapp:latest", - Equivalent: "docker.io/dmcgowan/myapp:latest", - }, - { - Reference: "docker.io/mcgowan/myapp:latest", - Equivalent: "docker.io/mcgowan/myapp:latest", - }, - { - Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1", - Equivalent: "docker.io/library/dbcc1", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Equivalent: "docker.io/library/dbcc1c", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - } - - for _, tcase := range tcases { - var ref Reference - var err error - if len(tcase.Digests) == 0 { - ref, err = ParseAnyReference(tcase.Reference) - } else { - ds := digestset.NewSet() - for _, dgst := range tcase.Digests { - if err := ds.Add(dgst); err != nil { - t.Fatalf("Error adding digest %s: %v", dgst.String(), err) - } - } - ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds) - } - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) - } - if ref.String() != tcase.Equivalent { - t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) - } - - expected := tcase.Expected - if expected == nil { - expected, err = Parse(tcase.Equivalent) - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) - } - } - if !equalReference(ref, expected) { - t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) - } - } -} - -func TestNormalizedSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "docker.io", - name: "test_com/foo", - }, - { - input: "docker/migrator", - domain: "docker.io", - name: "docker/migrator", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "xn--n3h.com/foo", - domain: "xn--n3h.com", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - { - input: "docker.io/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo/bar", - domain: "docker.io", - name: "library/foo/bar", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNormalizedNamed(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -func TestMatchError(t *testing.T) { - named, err := ParseAnyReference("foo") - if err != nil { - t.Fatal(err) - } - _, err = FamiliarMatch("[-x]", named) - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestMatch(t *testing.T) { - matchCases := []struct { - reference string - pattern string - expected bool - }{ - { - reference: "foo", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/any/bat", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/a/bar", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/b/baz", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/*/baz:tag", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/c/baz:tag", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "*/foo/c/baz", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "example.com/foo/c/baz", - expected: true, - }, - } - for _, c := range matchCases { - named, err := ParseAnyReference(c.reference) - if err != nil { - t.Fatal(err) - } - actual, err := FamiliarMatch(c.pattern, named) - if err != nil { - t.Fatal(err) - } - if actual != c.expected { - t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go deleted file mode 100644 index 16b871f9..00000000 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ /dev/null @@ -1,659 +0,0 @@ -package reference - -import ( - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/json" - "strconv" - "strings" - "testing" - - "github.com/opencontainers/go-digest" -) - -func TestReferenceParse(t *testing.T) { - // referenceTestcases is a unified set of testcases for - // testing the parsing of references - referenceTestcases := []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from Parse, or nil - err error - // repository is the string representation for the reference - repository string - // domain is the domain expected in the reference - domain string - // tag is the tag for the reference - tag string - // digest is the digest for the reference (enforces digest reference) - digest string - }{ - { - input: "test_com", - repository: "test_com", - }, - { - input: "test.com:tag", - repository: "test.com", - tag: "tag", - }, - { - input: "test.com:5000", - repository: "test.com", - tag: "5000", - }, - { - input: "test.com/repo:tag", - domain: "test.com", - repository: "test.com/repo", - tag: "tag", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "test:5000/repo:tag", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - }, - { - input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "repo@sha256:ffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestInvalidLength, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestUnsupported, - }, - { - input: "Uppercase:tag", - err: ErrNameContainsUppercase, - }, - // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. - // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 - //{ - // input: "Uppercase/lowercase:tag", - // err: ErrNameContainsUppercase, - //}, - { - input: "test:5000/Uppercase/lowercase:tag", - err: ErrNameContainsUppercase, - }, - { - input: "lowercase:Uppercase", - repository: "lowercase", - tag: "Uppercase", - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - domain: "a", - repository: strings.Repeat("a/", 127) + "a", - tag: "tag-puts-this-over-max", - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - }, - { - input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - tag: "some-long-tag", - }, - { - input: "b.gcr.io/test.example.com/my-app:test.example.com", - domain: "b.gcr.io", - repository: "b.gcr.io/test.example.com/my-app", - tag: "test.example.com", - }, - { - input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - domain: "xn--n3h.com", - repository: "xn--n3h.com/myimage", - tag: "xn--n3h.com", - }, - { - input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - domain: "xn--7o8h.com", - repository: "xn--7o8h.com/myimage", - tag: "xn--7o8h.com", - digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "foo_bar.com:8080", - repository: "foo_bar.com", - tag: "8080", - }, - { - input: "foo/foo_bar.com:8080", - domain: "foo", - repository: "foo/foo_bar.com", - tag: "8080", - }, - } - for _, testcase := range referenceTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - repo, err := Parse(testcase.input) - if testcase.err != nil { - if err == nil { - failf("missing expected error: %v", testcase.err) - } else if testcase.err != err { - failf("mismatched error: got %v, expected %v", err, testcase.err) - } - continue - } else if err != nil { - failf("unexpected parse error: %v", err) - continue - } - if repo.String() != testcase.input { - failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) - } - - if named, ok := repo.(Named); ok { - if named.Name() != testcase.repository { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) - } - domain, _ := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - } else if testcase.repository != "" || testcase.domain != "" { - failf("expected named type, got %T", repo) - } - - tagged, ok := repo.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", repo) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := repo.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", repo) - } - } else if ok { - failf("unexpected digested type") - } - - } -} - -// TestWithNameFailure tests cases where WithName should fail. Cases where it -// should succeed are covered by TestSplitHostname, below. -func TestWithNameFailure(t *testing.T) { - testcases := []struct { - input string - err error - }{ - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - _, err := WithName(testcase.input) - if err == nil { - failf("no error parsing name. expected: %s", testcase.err) - } - } -} - -func TestSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "", - name: "test_com/foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -type serializationType struct { - Description string - Field Field -} - -func TestSerialization(t *testing.T) { - testcases := []struct { - description string - input string - name string - tag string - digest string - err error - }{ - { - description: "empty value", - err: ErrNameEmpty, - }, - { - description: "just a name", - input: "example.com:8000/named", - name: "example.com:8000/named", - }, - { - description: "name with a tag", - input: "example.com:8000/named:tagged", - name: "example.com:8000/named", - tag: "tagged", - }, - { - description: "name with digest", - input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", - name: "other.com/named", - digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - m := map[string]string{ - "Description": testcase.description, - "Field": testcase.input, - } - b, err := json.Marshal(m) - if err != nil { - failf("error marshalling: %v", err) - } - t := serializationType{} - - if err := json.Unmarshal(b, &t); err != nil { - if testcase.err == nil { - failf("error unmarshalling: %v", err) - } - if err != testcase.err { - failf("wrong error, expected %v, got %v", testcase.err, err) - } - - continue - } else if testcase.err != nil { - failf("expected error unmarshalling: %v", testcase.err) - } - - if t.Description != testcase.description { - failf("wrong description, expected %q, got %q", testcase.description, t.Description) - } - - ref := t.Field.Reference() - - if named, ok := ref.(Named); ok { - if named.Name() != testcase.name { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) - } - } else if testcase.name != "" { - failf("expected named type, got %T", ref) - } - - tagged, ok := ref.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", ref) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := ref.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", ref) - } - } else if ok { - failf("unexpected digested type") - } - - t = serializationType{ - Description: testcase.description, - Field: AsField(ref), - } - - b2, err := json.Marshal(t) - if err != nil { - failf("error marshing serialization type: %v", err) - } - - if string(b) != string(b2) { - failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) - } - - // Ensure t.Field is not implementing "Reference" directly, getting - // around the Reference type system - var fieldInterface interface{} = t.Field - if _, ok := fieldInterface.(Reference); ok { - failf("field should not implement Reference interface") - } - - } -} - -func TestWithTag(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - tag: "tag", - combined: "test.com/foo:tag", - }, - { - name: "foo", - tag: "tag2", - combined: "foo:tag2", - }, - { - name: "test.com:8000/foo", - tag: "tag4", - combined: "test.com:8000/foo:tag4", - }, - { - name: "test.com:8000/foo", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.digest != "" { - canonical, err := WithDigest(named, testcase.digest) - if err != nil { - failf("error adding digest") - } - named = canonical - } - - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("WithTag failed: %s", err) - } - if tagged.String() != testcase.combined { - failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) - } - } -} - -func TestWithDigest(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "latest", - combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.tag != "" { - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("error adding tag") - } - named = tagged - } - digested, err := WithDigest(named, testcase.digest) - if err != nil { - failf("WithDigest failed: %s", err) - } - if digested.String() != testcase.combined { - failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) - } - } -} - -func TestParseNamed(t *testing.T) { - testcases := []struct { - input string - domain string - name string - err error - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test_com/foo", - err: ErrNameNotCanonical, - }, - { - input: "test.com", - err: ErrNameNotCanonical, - }, - { - input: "foo", - err: ErrNameNotCanonical, - }, - { - input: "library/foo", - err: ErrNameNotCanonical, - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - // Ambiguous case, parser will add "library/" to foo - { - input: "docker.io/foo", - err: ErrNameNotCanonical, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNamed(testcase.input) - if err != nil && testcase.err == nil { - failf("error parsing name: %s", err) - continue - } else if err == nil && testcase.err != nil { - failf("parsing succeded: expected error %v", testcase.err) - continue - } else if err != testcase.err { - failf("unexpected error %v, expected %v", err, testcase.err) - continue - } else if err != nil { - continue - } - - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go deleted file mode 100644 index 09bc8192..00000000 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ /dev/null @@ -1,553 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - "testing" -) - -type regexpMatch struct { - input string - match bool - subs []string -} - -func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { - matches := r.FindStringSubmatch(m.input) - if m.match && matches != nil { - if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { - t.Fatalf("Bad match result %#v for %q", matches, m.input) - } - if len(matches) < (len(m.subs) + 1) { - t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) - } - for i := range m.subs { - if m.subs[i] != matches[i+1] { - t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) - } - } - } else if m.match { - t.Errorf("Expected match for %q", m.input) - } else if matches != nil { - t.Errorf("Unexpected match for %q", m.input) - } -} - -func TestDomainRegexp(t *testing.T) { - hostcases := []regexpMatch{ - { - input: "test.com", - match: true, - }, - { - input: "test.com:10304", - match: true, - }, - { - input: "test.com:http", - match: false, - }, - { - input: "localhost", - match: true, - }, - { - input: "localhost:8080", - match: true, - }, - { - input: "a", - match: true, - }, - { - input: "a.b", - match: true, - }, - { - input: "ab.cd.com", - match: true, - }, - { - input: "a-b.com", - match: true, - }, - { - input: "-ab.com", - match: false, - }, - { - input: "ab-.com", - match: false, - }, - { - input: "ab.c-om", - match: true, - }, - { - input: "ab.-com", - match: false, - }, - { - input: "ab.com-", - match: false, - }, - { - input: "0101.com", - match: true, // TODO(dmcgowan): valid if this should be allowed - }, - { - input: "001a.com", - match: true, - }, - { - input: "b.gbc.io:443", - match: true, - }, - { - input: "b.gbc.io", - match: true, - }, - { - input: "xn--n3h.com", // ☃.com in punycode - match: true, - }, - { - input: "Asdf.com", // uppercase character - match: true, - }, - } - r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) - for i := range hostcases { - checkRegexp(t, r, hostcases[i]) - } -} - -func TestFullNameRegexp(t *testing.T) { - if anchoredNameRegexp.NumSubexp() != 2 { - t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", - anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "", - match: false, - }, - { - input: "short", - match: true, - subs: []string{"", "short"}, - }, - { - input: "simple/name", - match: true, - subs: []string{"simple", "name"}, - }, - { - input: "library/ubuntu", - match: true, - subs: []string{"library", "ubuntu"}, - }, - { - input: "docker/stevvooe/app", - match: true, - subs: []string{"docker", "stevvooe/app"}, - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, - }, - { - input: "aa/aa/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/bb/bb/bb"}, - }, - { - input: "a/a/a/a", - match: true, - subs: []string{"a", "a/a/a"}, - }, - { - input: "a/a/a/a/", - match: false, - }, - { - input: "a//a/a", - match: false, - }, - { - input: "a", - match: true, - subs: []string{"", "a"}, - }, - { - input: "a/aa", - match: true, - subs: []string{"a", "aa"}, - }, - { - input: "a/aa/a", - match: true, - subs: []string{"a", "aa/a"}, - }, - { - input: "foo.com", - match: true, - subs: []string{"", "foo.com"}, - }, - { - input: "foo.com/", - match: false, - }, - { - input: "foo.com:8080/bar", - match: true, - subs: []string{"foo.com:8080", "bar"}, - }, - { - input: "foo.com:http/bar", - match: false, - }, - { - input: "foo.com/bar", - match: true, - subs: []string{"foo.com", "bar"}, - }, - { - input: "foo.com/bar/baz", - match: true, - subs: []string{"foo.com", "bar/baz"}, - }, - { - input: "localhost:8080/bar", - match: true, - subs: []string{"localhost:8080", "bar"}, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - match: true, - subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, - }, - { - input: "blog.foo.com/bar/baz", - match: true, - subs: []string{"blog.foo.com", "bar/baz"}, - }, - { - input: "a^a", - match: false, - }, - { - input: "aa/asdf$$^/aa", - match: false, - }, - { - input: "asdf$$^/aa", - match: false, - }, - { - input: "aa-a/a", - match: true, - subs: []string{"aa-a", "a"}, - }, - { - input: strings.Repeat("a/", 128) + "a", - match: true, - subs: []string{"a", strings.Repeat("a/", 127) + "a"}, - }, - { - input: "a-/a/a/a", - match: false, - }, - { - input: "foo.com/a-/a/a", - match: false, - }, - { - input: "-foo/bar", - match: false, - }, - { - input: "foo/bar-", - match: false, - }, - { - input: "foo-/bar", - match: false, - }, - { - input: "foo/-bar", - match: false, - }, - { - input: "_foo/bar", - match: false, - }, - { - input: "foo_bar", - match: true, - subs: []string{"", "foo_bar"}, - }, - { - input: "foo_bar.com", - match: true, - subs: []string{"", "foo_bar.com"}, - }, - { - input: "foo_bar.com:8080", - match: false, - }, - { - input: "foo_bar.com:8080/app", - match: false, - }, - { - input: "foo.com/foo_bar", - match: true, - subs: []string{"foo.com", "foo_bar"}, - }, - { - input: "____/____", - match: false, - }, - { - input: "_docker/_docker", - match: false, - }, - { - input: "docker_/docker_", - match: false, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "xn--n3h.com/myimage", // ☃.com in punycode - match: true, - subs: []string{"xn--n3h.com", "myimage"}, - }, - { - input: "xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"xn--7o8h.com", "myimage"}, - }, - { - input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"example.com", "xn--7o8h.com/myimage"}, - }, - { - input: "example.com/some_separator__underscore/myimage", - match: true, - subs: []string{"example.com", "some_separator__underscore/myimage"}, - }, - { - input: "example.com/__underscore/myimage", - match: false, - }, - { - input: "example.com/..dots/myimage", - match: false, - }, - { - input: "example.com/.dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "docker./docker", - match: false, - }, - { - input: ".docker/docker", - match: false, - }, - { - input: "docker-/docker", - match: false, - }, - { - input: "-docker/docker", - match: false, - }, - { - input: "do..cker/docker", - match: false, - }, - { - input: "do__cker:8080/docker", - match: false, - }, - { - input: "do__cker/docker", - match: true, - subs: []string{"", "do__cker/docker"}, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "registry.io/foo/project--id.module--name.ver---sion--name", - match: true, - subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, - }, - { - input: "Asdf.com/foo/bar", // uppercase character in hostname - match: true, - }, - { - input: "Foo/FarB", // uppercase characters in remote name - match: false, - }, - } - for i := range testcases { - checkRegexp(t, anchoredNameRegexp, testcases[i]) - } -} - -func TestReferenceRegexp(t *testing.T) { - if ReferenceRegexp.NumSubexp() != 3 { - t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", - ReferenceRegexp, ReferenceRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "registry.com:8080/myapp:tag", - match: true, - subs: []string{"registry.com:8080/myapp", "tag", ""}, - }, - { - input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@sha256:badbadbadbad", - match: false, - }, - { - input: "registry.com:8080/myapp:invalid~tag", - match: false, - }, - { - input: "bad_hostname.com:8080/myapp:tag", - match: false, - }, - { - input:// localhost treated as name, missing tag with 8080 as tag - "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: false, - }, - { - // localhost will be treated as an image name without a host - input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@bad", - match: false, - }, - { - input: "registry.com:8080/myapp@2bad", - match: false, // TODO(dmcgowan): Support this as valid - }, - } - - for i := range testcases { - checkRegexp(t, ReferenceRegexp, testcases[i]) - } - -} - -func TestIdentifierRegexp(t *testing.T) { - fullCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: false, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - } - - shortCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: true, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - { - input: "da304", - match: false, - }, - { - input: "da304e", - match: true, - }, - } - - for i := range fullCases { - checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) - } - - for i := range shortCases { - checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) - } -} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index a3a80ab8..00000000 --- a/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,113 +0,0 @@ -package distribution - -import ( - "context" - - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// WithManifestMediaTypes lists the media types the client wishes -// the server to provide. -func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { - return WithManifestMediaTypesOption{mediaTypes} -} - -// WithManifestMediaTypesOption holds a list of accepted media types -type WithManifestMediaTypesOption struct{ MediaTypes []string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index f22df2b8..00000000 --- a/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index d0ebadf8..00000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,49 +0,0 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/docker/.DEREK.yml b/vendor/github.com/docker/docker/.DEREK.yml deleted file mode 100644 index 3fd67891..00000000 --- a/vendor/github.com/docker/docker/.DEREK.yml +++ /dev/null @@ -1,17 +0,0 @@ -curators: - - aboch - - alexellis - - andrewhsu - - anonymuse - - chanwit - - ehazlett - - fntlnz - - gianarb - - mgoelzer - - programmerq - - rheinwein - - ripcurld0 - - thajeztah - -features: - - comments diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore deleted file mode 100644 index 4a56f2e0..00000000 --- a/vendor/github.com/docker/docker/.dockerignore +++ /dev/null @@ -1,7 +0,0 @@ -bundles -.gopath -vendor/pkg -.go-pkg-cache -.git -hack/integration-cli-on-swarm/integration-cli-on-swarm - diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore deleted file mode 100644 index 392bf963..00000000 --- a/vendor/github.com/docker/docker/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Docker project generated files to ignore -# if you want to ignore files created by your editor/tools, -# please consider a global .gitignore https://help.github.com/articles/ignoring-files -*.exe -*.exe~ -*.orig -test.main -.*.swp -.DS_Store -# a .bashrc may be added to customize the build environment -.bashrc -.editorconfig -.gopath/ -.go-pkg-cache/ -autogen/ -bundles/ -cmd/dockerd/dockerd -contrib/builder/rpm/*/changelog -dockerversion/version_autogen.go -dockerversion/version_autogen_unix.go -vendor/pkg/ -hack/integration-cli-on-swarm/integration-cli-on-swarm -coverage.txt -profile.out diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap deleted file mode 100644 index 828ea784..00000000 --- a/vendor/github.com/docker/docker/.mailmap +++ /dev/null @@ -1,479 +0,0 @@ -# Generate AUTHORS: hack/generate-authors.sh - -# Tip for finding duplicates (besides scanning the output of AUTHORS for name -# duplicates that aren't also email duplicates): scan the output of: -# git log --format='%aE - %aN' | sort -uf -# -# For explanation on this file format: man git-shortlog - -<21551195@zju.edu.cn> - -Aaron L. Xu -Abhinandan Prativadi -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -AJ Bowen -AJ Bowen -Akihiro Matsushima -Akihiro Suda -Aleksa Sarai -Aleksa Sarai -Aleksa Sarai -Aleksandrs Fadins -Alessandro Boch -Alex Chen -Alex Ellis -Alexander Larsson -Alexander Morozov -Alexander Morozov -Alexandre Beslic -Alicia Lauerman -Allen Sun -Allen Sun -Andrew Weiss -Andrew Weiss -André Martins -Andy Rothfusz -Andy Smith -Ankush Agarwal -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Anuj Bahuguna -Anuj Bahuguna -Anusha Ragunathan -Arnaud Porterie -Arnaud Porterie -Arthur Gautier -Avi Miller -Ben Bonnefoy -Ben Golub -Ben Toews -Benoit Chesneau -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bin Liu -Bin Liu -Bingshen Wang -Boaz Shuster -Brandon Philips -Brandon Philips -Brent Salisbury -Brian Goff -Brian Goff -Brian Goff -Chander Govindarajan -Chao Wang -Charles Hooper -Chen Chao -Chen Chuanliang -Chen Mingjie -Chen Qiu -Chen Qiu <21321229@zju.edu.cn> -Chris Dias -Chris McKinnel -Christopher Biscardi -Christopher Latham -Chun Chen -Corbin Coleman -Cristian Staretu -Cristian Staretu -Cristian Staretu -CUI Wei cuiwei13 -Daehyeok Mun -Daehyeok Mun -Daehyeok Mun -Dan Feldman -Daniel Dao -Daniel Dao -Daniel Garcia -Daniel Gasienica -Daniel Goosen -Daniel Grunwell -Daniel J Walsh -Daniel Mizyrycki -Daniel Mizyrycki -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Watkins -Danny Yates -Darren Shepherd -Dattatraya Kumbhar -Dave Goodchild -Dave Henderson -Dave Tucker -David M. Karr -David Sheets -David Sissitka -David Williamson -Deshi Xiao -Deshi Xiao -Diego Siqueira -Diogo Monica -Dominik Honnef -Doug Davis -Doug Tangren -Elan Ruusamäe -Elan Ruusamäe -Eric G. Noriega -Eric Hanchrow -Eric Rosenberg -Erica Windisch -Erica Windisch -Erik Hollensbe -Erwin van der Koogh -Euan Kemp -Eugen Krizo -Evan Hazlett -Evelyn Xu -Evgeny Shmarnev -Faiz Khan -Felix Hupfeld -Felix Ruess -Feng Yan -Fengtu Wang -Francisco Carriedo -Frank Rosquin -Frederick F. Kautz IV -Gabriel Nicolas Avellaneda -Gaetan de Villele -Gang Qiao <1373319223@qq.com> -George Kontridze -Gerwim Feiken -Giampaolo Mancini -Gopikannan Venugopalsamy -Gou Rao -Greg Stephens -Guillaume J. Charmes -Guillaume J. Charmes -Guillaume J. Charmes -Guillaume J. Charmes -Guillaume J. Charmes -Gurjeet Singh -Gustav Sinder -Günther Jungbluth -Hakan Özler -Hao Shu Wei -Hao Shu Wei -Harald Albers -Harold Cooper -Harry Zhang -Harry Zhang -Harry Zhang -Harry Zhang -Harshal Patil -Helen Xie -Hollie Teal -Hollie Teal -Hollie Teal -Hu Keping -Huu Nguyen -Hyzhou Zhy -Hyzhou Zhy <1187766782@qq.com> -Ilya Khlopotov -Ivan Markin -Jack Laxson -Jacob Atzen -Jacob Tomlinson -Jaivish Kothari -Jamie Hannaford -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Nickoloff -Jeroen Franse -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jim Galasyn -Jiuyue Ma -Joey Geiger -Joffrey F -Joffrey F -Joffrey F -Johan Euphrosine -John Harris -John Howard (VM) -John Howard (VM) -John Howard (VM) -John Howard (VM) -John Howard (VM) -John Stephens -Jon Surrell -Jordan Arentsen -Jordan Jennings -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Josh Bonczkowski -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Soref -Josh Wilson -Joyce Jang -Julien Bordellier -Julien Bordellier -Justin Cormack -Justin Cormack -Justin Cormack -Justin Simonelis -Jérôme Petazzoni -Jérôme Petazzoni -Jérôme Petazzoni -K. Heller -Kai Qiang Wu (Kennan) -Kai Qiang Wu (Kennan) -Kamil Domański -Kamjar Gerami -Ken Cochrane -Ken Herner -Kenfe-Mickaël Laventure -Kevin Feyrer -Kevin Kern -Kevin Meredith -Kir Kolyshkin -Kir Kolyshkin -Kir Kolyshkin -Konrad Kleine -Konstantin Gribov -Konstantin Pelykh -Kotaro Yoshimatsu -Kunal Kushwaha -Lajos Papp -Lei Jitang -Lei Jitang -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Linus Heckemann -Linus Heckemann -Lokesh Mandvekar -Lorenzo Fontana -Louis Opter -Louis Opter -Luca Favatella -Luke Marsden -Lyn -Lynda O'Leary -Lynda O'Leary -Ma Müller -Madhan Raj Mookkandy -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mansi Nahar -Mansi Nahar -Marc Abramowitz -Marcelo Horacio Fortino -Marcus Linke -Marianna Tessel -Mark Oates -Markan Patel -Markus Kortlang -Martin Redmond -Martin Redmond -Mary Anthony -Mary Anthony -Mary Anthony moxiegirl -Matt Bentley -Matt Schurenko -Matt Williams -Matt Williams -Matthew Heon -Matthew Mosesohn -Matthew Mueller -Matthias Kühnle -Mauricio Garavaglia -Michael Crosby -Michael Crosby -Michael Crosby -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Spetsiotis -Michal Minář -Miguel Angel Alvarez Cabrerizo <30386061+doncicuto@users.noreply.github.com> -Miguel Angel Fernández -Mihai Borobocea -Mike Casas -Mike Goelzer -Milind Chawre -Misty Stanley-Jones -Mohit Soni -Moorthy RS -Moysés Borges -Moysés Borges -Nace Oroz -Nathan LeClaire -Nathan LeClaire -Neil Horman -Nick Russo -Nicolas Borboën -Nigel Poulton -Nik Nyby -Nolan Darilek -O.S. Tezer -O.S. Tezer -Oh Jinkyun -Ouyang Liduo -Patrick Stapleton -Paul Liljenberg -Pavel Tikhomirov -Pawel Konczalski -Peter Choi -Peter Dave Hello -Peter Jaffe -Peter Nagy -Peter Waller -Phil Estes -Philip Alexander Etling -Philipp Gillé -Qiang Huang -Qiang Huang -Ray Tsang -Renaud Gaubert -Robert Terhaar -Roberto G. Hashioka -Roberto Muñoz Fernández -Roman Dudin -Ross Boucher -Runshen Zhu -Ryan Stelly -Sakeven Jiang -Sandeep Bansal -Sandeep Bansal -Sargun Dhillon -Sean Lee -Sebastiaan van Stijn -Sebastiaan van Stijn -Shaun Kaasten -Shawn Landden -Shengbo Song -Shengbo Song -Shih-Yuan Lee -Shishir Mahajan -Shukui Yang -Shuwei Hao -Shuwei Hao -Sidhartha Mani -Sjoerd Langkemper -Solomon Hykes -Solomon Hykes -Solomon Hykes -Soshi Katsuta -Soshi Katsuta -Sridhar Ratnakumar -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Stefan Berger -Stefan Berger -Stefan J. Wernli -Stefan S. -Stephan Spindler -Stephen Day -Stephen Day -Steve Desmond -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sun Jianbo -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit <¨SvenDowideit@home.org.au¨> -Sylvain Bellemare -Sylvain Bellemare -Tangi Colin -Tejesh Mehta -Thatcher Peskens -Thatcher Peskens -Thatcher Peskens -Thomas Gazagnaire -Thomas Léveil -Thomas Léveil -Tibor Vass -Tibor Vass -Tim Bart -Tim Bosse -Tim Ruffles -Tim Terhorst -Tim Zju <21651152@zju.edu.cn> -Timothy Hobbs -Toli Kuznets -Tom Barlow -Tom Sweeney -Tõnis Tiigi -Trishna Guha -Tristan Carel -Tristan Carel -Umesh Yadav -Umesh Yadav -Victor Lyuboslavsky -Victor Vieux -Victor Vieux -Victor Vieux -Victor Vieux -Victor Vieux -Victor Vieux -Viktor Vojnovski -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Demeester -Vincent Demeester -Vishnu Kannan -Vladimir Rutsky -Walter Stanish -Wang Chao -Wang Chao -Wang Guoliang -Wang Jie -Wang Ping -Wang Xing -Wang Yuexiao -Wayne Chang -Wayne Song -Wei Wu cizixs -Wenjun Tang -Wewang Xiaorenfine -Will Weaver -Xianglin Gao -Xianlu Bird -Xiaoyu Zhang -Xuecong Liao -Yamasaki Masahide -Yao Zaiyong -Yassine Tijani -Yazhong Liu -Yestin Sun -Yi EungJun -Ying Li -Ying Li -Yong Tang -Yosef Fertel -Yu Changchun -Yu Chengxia -Yu Peng -Yu Peng -Zachary Jaffee -Zachary Jaffee -ZhangHang -Zhenkun Bi -Zhou Hao -Zhu Kunjia -Zou Yu diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index c5dafd72..c6c8fb40 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -39,7 +39,7 @@ Ahmed Kamal Ahmet Alp Balkan Aidan Feldman Aidan Hobson Sayers -AJ Bowen +AJ Bowen Ajey Charantimath ajneu Akash Gupta @@ -54,6 +54,7 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang +Alejandro González Hevia Aleksa Sarai Aleksandrs Fadins Alena Prokharchyk @@ -65,6 +66,7 @@ Alex Coventry Alex Crawford Alex Ellis Alex Gaynor +Alex Goodman Alex Olshansky Alex Samorukov Alex Warhawk @@ -77,6 +79,7 @@ Alexander Shopov Alexandre Beslic Alexandre Garnier Alexandre González +Alexandre Jomin Alexandru Sfirlogea Alexey Guskov Alexey Kotlyarov @@ -98,11 +101,13 @@ Amir Goldstein Amit Bakshi Amit Krishnan Amit Shukla +Amr Gawish Amy Lindburg Anand Patil AnandkumarPatel Anatoly Borodin Anchal Agrawal +Anda Xu Anders Janmyr Andre Dublin <81dublin@gmail.com> Andre Granovsky @@ -196,7 +201,9 @@ Ben Severson Ben Toews Ben Wiklund Benjamin Atkin +Benjamin Baker Benjamin Boudreau +Benjamin Yolken Benoit Chesneau Bernerd Schaefer Bernhard M. Wiedemann @@ -240,6 +247,7 @@ Brian Torres-Gil Brian Trump Brice Jaglin Briehan Lombaard +Brielle Broder Bruno Bigras Bruno Binet Bruno Gazzera @@ -267,6 +275,7 @@ Carlos Sanchez Carol Fager-Higgins Cary Casey Bisson +Catalin Pirvu Ce Gao Cedric Davies Cezar Sa Espinola @@ -293,6 +302,8 @@ Chen Min Chen Mingjie Chen Qiu Cheng-mean Liu +Chengguang Xu +chenyuzhu Chetan Birajdar Chewey Chia-liang Kao @@ -313,11 +324,14 @@ Chris Snow Chris St. Pierre Chris Stivers Chris Swan +Chris Telfer Chris Wahl Chris Weyl +Chris White Christian Berendt Christian Brauner Christian Böhme +Christian Muehlhaeuser Christian Persson Christian Rotzoll Christian Simon @@ -336,6 +350,7 @@ Chun Chen Ciro S. Costa Clayton Coleman Clinton Kitson +Cody Roseborough Coenraad Loubser Colin Dunklau Colin Hebert @@ -411,6 +426,7 @@ Dave MacDonald Dave Tucker David Anderson David Calavera +David Chung David Corking David Cramer David Currie @@ -432,6 +448,7 @@ David Röthlisberger David Sheets David Sissitka David Trott +David Wang <00107082@163.com> David Williamson David Xia David Young @@ -439,6 +456,7 @@ Davide Ceretti Dawn Chen dbdd dcylabs +Debayan De Deborah Gertrude Digges deed02392 Deng Guangxing @@ -491,6 +509,7 @@ Don Kjer Don Spaulding Donald Huang Dong Chen +Donghwa Kim Donovan Jones Doron Podoleanu Doug Davis @@ -510,6 +529,7 @@ Eike Herzbach Eivin Giske Skaaren Eivind Uggedal Elan Ruusamäe +Elango Sivanandam Elena Morozova Eli Uriegas Elias Faxö @@ -548,6 +568,7 @@ Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Ethan Bell Euan Kemp Eugen Krizo Eugene Yakubovich @@ -566,6 +587,7 @@ Eystein Måløy Stenberg ezbercih Ezra Silvera Fabian Lauer +Fabian Raetz Fabiano Rosas Fabio Falci Fabio Kung @@ -575,7 +597,9 @@ Fabrizio Regini Fabrizio Soppelsa Faiz Khan falmp +Fangming Fang Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun Fareed Dudhia Fathi Boudra Federico Gimenez @@ -606,6 +630,7 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy +Francesco Mari Francis Chuang Francisco Carriedo Francisco Souza @@ -638,6 +663,7 @@ Gaël PORTAY Genki Takiuchi GennadySpb Geoffrey Bachelet +Geon Kim George Kontridze George MacRorie George Xie @@ -661,6 +687,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grant Millar Grant Reaber Graydon Hoare Greg Fausak @@ -673,11 +700,13 @@ Guilherme Salgado Guillaume Dufour Guillaume J. Charmes guoxiuyan +Guri Gurjeet Singh Guruprasad Gustav Sinder gwx296173 Günter Zöchbauer +haikuoliu Hakan Özler Hans Kristian Flaatten Hans Rødtang @@ -696,6 +725,7 @@ heartlock <21521209@zju.edu.cn> Hector Castro Helen Xie Henning Sprang +Hiroshi Hatake Hobofan Hollie Teal Hong Xu @@ -718,6 +748,7 @@ Ian Bishop Ian Bull Ian Calvert Ian Campbell +Ian Chen Ian Lee Ian Main Ian Philpot @@ -738,6 +769,7 @@ Ingo Gottwald Isaac Dupree Isabel Jimenez Isao Jonas +Iskander Sharipov Ivan Babrou Ivan Fraixedes Ivan Grcic @@ -808,6 +840,7 @@ Jean-Pierre Huynh Jean-Tiare Le Bigot Jeeva S. Chelladhurai Jeff Anderson +Jeff Hajewski Jeff Johnston Jeff Lindsay Jeff Mickey @@ -829,7 +862,7 @@ Jeroen Franse Jeroen Jacobs Jesse Dearing Jesse Dubay -Jessica Frazelle +Jessica Frazelle Jezeniel Zapanta Jhon Honce Ji.Zhilong @@ -893,6 +926,7 @@ Jonas Pfenniger Jonathan A. Sternberg Jonathan Boulle Jonathan Camp +Jonathan Choy Jonathan Dowland Jonathan Lebon Jonathan Lomas @@ -962,7 +996,9 @@ Kareem Khazem kargakis Karl Grzeszczak Karol Duleba +Karthik Karanth Karthik Nayak +Kasper Fabæch Brandt Kate Heddleston Katie McLaughlin Kato Kazuyoshi @@ -970,6 +1006,7 @@ Katrina Owen Kawsar Saiyeed Kay Yan kayrus +Kazuhiro Sera Ke Li Ke Xu Kei Ohmura @@ -978,6 +1015,7 @@ Keli Hu Ken Cochrane Ken Herner Ken ICHIKAWA +Ken Reese Kenfe-Mickaël Laventure Kenjiro Nakayama Kent Johnson @@ -1015,9 +1053,9 @@ Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova -krrg Kun Zhang Kunal Kushwaha +Kunal Tyagi Kyle Conroy Kyle Linden kyu @@ -1040,6 +1078,7 @@ Leandro Siqueira Lee Chao <932819864@qq.com> Lee, Meng-Han leeplay +Lei Gong Lei Jitang Len Weincier Lennie @@ -1075,6 +1114,7 @@ Lokesh Mandvekar longliqiang88 <394564827@qq.com> Lorenz Leutgeb Lorenzo Fontana +Lotus Fenn Louis Opter Luca Favatella Luca Marturana @@ -1108,6 +1148,7 @@ Manfred Zabarauskas Manjunath A Kumatagi Mansi Nahar Manuel Meurer +Manuel Rüger Manuel Woelker mapk0y Marc Abramowitz @@ -1146,13 +1187,16 @@ Martijn van Oosterhout Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen +Martin Muzatko Martin Redmond Mary Anthony Masahito Zembutsu +Masato Ohba Masayuki Morita Mason Malone Mateusz Sulima Mathias Monnerville +Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent Matt Apperson @@ -1209,6 +1253,7 @@ Michael Huettermann Michael Irwin Michael Käufl Michael Neale +Michael Nussbaum Michael Prokop Michael Scharf Michael Spetsiotis @@ -1223,8 +1268,10 @@ Michal Minář Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz -Michiel@unhosted -Mickaël FORTUNATO +Michał Gryko +Michiel de Jong +Mickaël Fortunato +Mickaël Remars Miguel Angel Fernández Miguel Morales Mihai Borobocea @@ -1239,6 +1286,7 @@ Mike Estes Mike Gaffney Mike Goelzer Mike Leone +Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer @@ -1297,6 +1345,7 @@ Niall O'Higgins Nicholas E. Rabenau Nick DeCoursin Nick Irvine +Nick Neisen Nick Parker Nick Payne Nick Russo @@ -1310,6 +1359,7 @@ Nicolas Dudebout Nicolas Goy Nicolas Kaiser Nicolas Sterchele +Nicolas V Castet Nicolás Hock Isaza Nigel Poulton Nik Nyby @@ -1322,6 +1372,7 @@ Nishant Totla NIWA Hideyuki Noah Meyerhans Noah Treuhaft +NobodyOnSE noducks Nolan Darilek nponeccop @@ -1329,7 +1380,6 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified -odk- Oguz Bilgic Oh Jinkyun Ohad Schneider @@ -1352,6 +1402,7 @@ Patrick Böänziger Patrick Devine Patrick Hemmer Patrick Stapleton +Patrik Cyvoct pattichen Paul paul @@ -1423,6 +1474,8 @@ Pradip Dhara Prasanna Gautam Pratik Karki Prayag Verma +Priya Wadhwa +Projjol Banerji Przemek Hejman Pure White pysqz @@ -1517,6 +1570,7 @@ Rozhnov Alexandr Rudolph Gottesheim Rui Lopes Runshen Zhu +Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett @@ -1543,6 +1597,7 @@ Sachin Joshi Sagar Hani Sainath Grandhi Sakeven Jiang +Salahuddin Khan Sally O'Malley Sam Abed Sam Alba @@ -1591,6 +1646,7 @@ Sergey Alekseev Sergey Evstifeev Sergii Kabashniuk Serhat Gülçiçek +SeungUkLee Sevki Hasirci Shane Canon Shane da Silva @@ -1685,10 +1741,11 @@ tang0th Tangi Colin Tatsuki Sugiura Tatsushi Inagaki +Taylan Isikdemir Taylor Jones -tbonza Ted M. Young Tehmasp Chaudhri +Tejaswini Duggaraju Tejesh Mehta terryding77 <550147740@qq.com> tgic @@ -1782,6 +1839,7 @@ Tristan Carel Troy Denton Tycho Andersen Tyler Brock +Tyler Brown Tzu-Jung Lee uhayate Ulysse Carion @@ -1838,11 +1896,13 @@ Wang Xing Wang Yuexiao Ward Vandewege WarheadsSE +Wassim Dhif Wayne Chang Wayne Song Weerasak Chongnguluam Wei Wu Wei-Ting Kuo +weipeng weiyan Weiyang Zhu Wen Cheng Ma @@ -1875,11 +1935,13 @@ XiaoBing Jiang Xiaoxu Chen Xiaoyu Zhang xiekeyang +Ximo Guanter Gonzálbez Xinbo Weng Xinzi Zhou Xiuming Chen Xuecong Liao xuzhaokui +Yadnyawalkya Tale Yahya YAMADA Tsuyoshi Yamasaki Masahide @@ -1910,6 +1972,7 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yuhao Fang +Yuichiro Kaneko Yunxiang Huang Yurii Rashkovskii Yves Junqueira @@ -1948,5 +2011,6 @@ Zunayed Ali Átila Camurça Alves 尹吉峰 徐俊杰 +慕陶 搏通 黄艳红00139573 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md deleted file mode 100644 index e9db48fa..00000000 --- a/vendor/github.com/docker/docker/CHANGELOG.md +++ /dev/null @@ -1,3587 +0,0 @@ -# Changelog - -Items starting with `DEPRECATE` are important deprecation notices. For more -information on the list of deprecated flags and APIs please have a look at -https://docs.docker.com/engine/deprecated/ where target removal dates can also -be found. - -## 17.05.0-ce (2017-05-04) - -### Builder - -+ Add multi-stage build support [#31257](https://github.com/docker/docker/pull/31257) [#32063](https://github.com/docker/docker/pull/32063) -+ Allow using build-time args (`ARG`) in `FROM` [#31352](https://github.com/docker/docker/pull/31352) -+ Add an option for specifying build target [#32496](https://github.com/docker/docker/pull/32496) -* Accept `-f -` to read Dockerfile from `stdin`, but use local context for building [#31236](https://github.com/docker/docker/pull/31236) -* The values of default build time arguments (e.g `HTTP_PROXY`) are no longer displayed in docker image history unless a corresponding `ARG` instruction is written in the Dockerfile. [#31584](https://github.com/docker/docker/pull/31584) -- Fix setting command if a custom shell is used in a parent image [#32236](https://github.com/docker/docker/pull/32236) -- Fix `docker build --label` when the label includes single quotes and a space [#31750](https://github.com/docker/docker/pull/31750) - -### Client - -* Add `--mount` flag to `docker run` and `docker create` [#32251](https://github.com/docker/docker/pull/32251) -* Add `--type=secret` to `docker inspect` [#32124](https://github.com/docker/docker/pull/32124) -* Add `--format` option to `docker secret ls` [#31552](https://github.com/docker/docker/pull/31552) -* Add `--filter` option to `docker secret ls` [#30810](https://github.com/docker/docker/pull/30810) -* Add `--filter scope=` to `docker network ls` [#31529](https://github.com/docker/docker/pull/31529) -* Add `--cpus` support to `docker update` [#31148](https://github.com/docker/docker/pull/31148) -* Add label filter to `docker system prune` and other `prune` commands [#30740](https://github.com/docker/docker/pull/30740) -* `docker stack rm` now accepts multiple stacks as input [#32110](https://github.com/docker/docker/pull/32110) -* Improve `docker version --format` option when the client has downgraded the API version [#31022](https://github.com/docker/docker/pull/31022) -* Prompt when using an encrypted client certificate to connect to a docker daemon [#31364](https://github.com/docker/docker/pull/31364) -* Display created tags on successful `docker build` [#32077](https://github.com/docker/docker/pull/32077) -* Cleanup compose convert error messages [#32087](https://github.com/moby/moby/pull/32087) - -### Contrib - -+ Add support for building docker debs for Ubuntu 17.04 Zesty on amd64 [#32435](https://github.com/docker/docker/pull/32435) - -### Daemon - -- Fix `--api-cors-header` being ignored if `--api-enable-cors` is not set [#32174](https://github.com/docker/docker/pull/32174) -- Cleanup docker tmp dir on start [#31741](https://github.com/docker/docker/pull/31741) -- Deprecate `--graph` flag in favor or `--data-root` [#28696](https://github.com/docker/docker/pull/28696) - -### Logging - -+ Add support for logging driver plugins [#28403](https://github.com/docker/docker/pull/28403) -* Add support for showing logs of individual tasks to `docker service logs`, and add `/task/{id}/logs` REST endpoint [#32015](https://github.com/docker/docker/pull/32015) -* Add `--log-opt env-regex` option to match environment variables using a regular expression [#27565](https://github.com/docker/docker/pull/27565) - -### Networking - -+ Allow user to replace, and customize the ingress network [#31714](https://github.com/docker/docker/pull/31714) -- Fix UDP traffic in containers not working after the container is restarted [#32505](https://github.com/docker/docker/pull/32505) -- Fix files being written to `/var/lib/docker` if a different data-root is set [#32505](https://github.com/docker/docker/pull/32505) - -### Runtime - -- Ensure health probe is stopped when a container exits [#32274](https://github.com/docker/docker/pull/32274) - -### Swarm Mode - -+ Add update/rollback order for services (`--update-order` / `--rollback-order`) [#30261](https://github.com/docker/docker/pull/30261) -+ Add support for synchronous `service create` and `service update` [#31144](https://github.com/docker/docker/pull/31144) -+ Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to - `docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup - time [#28938](https://github.com/docker/docker/pull/28938) -* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/docker/docker/pull/32284) -* `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/docker/docker/pull/32284) -* Move `docker service logs` out of experimental [#32462](https://github.com/docker/docker/pull/32462) -* Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/docker/docker/pull/32339) -* Add `--entrypoint` flag to `docker service create` and `docker service update` [#29228](https://github.com/docker/docker/pull/29228) -* Add `--network-add` and `--network-rm` to `docker service update` [#32062](https://github.com/docker/docker/pull/32062) -* Add `--credential-spec` flag to `docker service create` and `docker service update` [#32339](https://github.com/docker/docker/pull/32339) -* Add `--filter mode=` to `docker service ls` [#31538](https://github.com/docker/docker/pull/31538) -* Resolve network IDs on the client side, instead of in the daemon when creating services [#32062](https://github.com/docker/docker/pull/32062) -* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424) -* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302) -* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813) -- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364) -- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333) -- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631) - -### Security - -* Allow setting SELinux type or MCS labels when using `--ipc=container:` or `--ipc=host` [#30652](https://github.com/docker/docker/pull/30652) - - -### Deprecation - -- Deprecate `--api-enable-cors` daemon flag. This flag was marked deprecated in Docker 1.6.0 but not listed in deprecated features [#32352](https://github.com/docker/docker/pull/32352) -- Remove Ubuntu 12.04 (Precise Pangolin) as supported platform. Ubuntu 12.04 is EOL, and no longer receives updates [#32520](https://github.com/docker/docker/pull/32520) - -## 17.04.0-ce (2017-04-05) - -### Builder - -* Disable container logging for build containers [#29552](https://github.com/docker/docker/pull/29552) -* Fix use of `**/` in `.dockerignore` [#29043](https://github.com/docker/docker/pull/29043) - -### Client - -+ Sort `docker stack ls` by name [#31085](https://github.com/docker/docker/pull/31085) -+ Flags for specifying bind mount consistency [#31047](https://github.com/docker/docker/pull/31047) -* Output of docker CLI --help is now wrapped to the terminal width [#28751](https://github.com/docker/docker/pull/28751) -* Suppress image digest in docker ps [#30848](https://github.com/docker/docker/pull/30848) -* Hide command options that are related to Windows [#30788](https://github.com/docker/docker/pull/30788) -* Fix `docker plugin install` prompt to accept "enter" for the "N" default [#30769](https://github.com/docker/docker/pull/30769) -+ Add `truncate` function for Go templates [#30484](https://github.com/docker/docker/pull/30484) -* Support expanded syntax of ports in `stack deploy` [#30476](https://github.com/docker/docker/pull/30476) -* Support expanded syntax of mounts in `stack deploy` [#30597](https://github.com/docker/docker/pull/30597) [#31795](https://github.com/docker/docker/pull/31795) -+ Add `--add-host` for docker build [#30383](https://github.com/docker/docker/pull/30383) -+ Add `.CreatedAt` placeholder for `docker network ls --format` [#29900](https://github.com/docker/docker/pull/29900) -* Update order of `--secret-rm` and `--secret-add` [#29802](https://github.com/docker/docker/pull/29802) -+ Add `--filter enabled=true` for `docker plugin ls` [#28627](https://github.com/docker/docker/pull/28627) -+ Add `--format` to `docker service ls` [#28199](https://github.com/docker/docker/pull/28199) -+ Add `publish` and `expose` filter for `docker ps --filter` [#27557](https://github.com/docker/docker/pull/27557) -* Support multiple service IDs on `docker service ps` [#25234](https://github.com/docker/docker/pull/25234) -+ Allow swarm join with `--availability=drain` [#24993](https://github.com/docker/docker/pull/24993) -* Docker inspect now shows "docker-default" when AppArmor is enabled and no other profile was defined [#27083](https://github.com/docker/docker/pull/27083) - -### Logging - -+ Implement optional ring buffer for container logs [#28762](https://github.com/docker/docker/pull/28762) -+ Add `--log-opt awslogs-create-group=` for awslogs (CloudWatch) to support creation of log groups as needed [#29504](https://github.com/docker/docker/pull/29504) -- Fix segfault when using the gcplogs logging driver with a "static" binary [#29478](https://github.com/docker/docker/pull/29478) - - -### Networking - -* Check parameter `--ip`, `--ip6` and `--link-local-ip` in `docker network connect` [#30807](https://github.com/docker/docker/pull/30807) -+ Added support for `dns-search` [#30117](https://github.com/docker/docker/pull/30117) -+ Added --verbose option for docker network inspect to show task details from all swarm nodes [#31710](https://github.com/docker/docker/pull/31710) -* Clear stale datapath encryption states when joining the cluster [docker/libnetwork#1354](https://github.com/docker/libnetwork/pull/1354) -+ Ensure iptables initialization only happens once [docker/libnetwork#1676](https://github.com/docker/libnetwork/pull/1676) -* Fix bad order of iptables filter rules [docker/libnetwork#961](https://github.com/docker/libnetwork/pull/961) -+ Add anonymous container alias to service record on attachable network [docker/libnetwork#1651](https://github.com/docker/libnetwork/pull/1651) -+ Support for `com.docker.network.container_interface_prefix` driver label [docker/libnetwork#1667](https://github.com/docker/libnetwork/pull/1667) -+ Improve network list performance by omitting network details that are not used [#30673](https://github.com/docker/docker/pull/30673) - -### Runtime - -* Handle paused container when restoring without live-restore set [#31704](https://github.com/docker/docker/pull/31704) -- Do not allow sub second in healthcheck options in Dockerfile [#31177](https://github.com/docker/docker/pull/31177) -* Support name and id prefix in `secret update` [#30856](https://github.com/docker/docker/pull/30856) -* Use binary frame for websocket attach endpoint [#30460](https://github.com/docker/docker/pull/30460) -* Fix linux mount calls not applying propagation type changes [#30416](https://github.com/docker/docker/pull/30416) -* Fix ExecIds leak on failed `exec -i` [#30340](https://github.com/docker/docker/pull/30340) -* Prune named but untagged images if `danglingOnly=true` [#30330](https://github.com/docker/docker/pull/30330) -+ Add daemon flag to set `no_new_priv` as default for unprivileged containers [#29984](https://github.com/docker/docker/pull/29984) -+ Add daemon option `--default-shm-size` [#29692](https://github.com/docker/docker/pull/29692) -+ Support registry mirror config reload [#29650](https://github.com/docker/docker/pull/29650) -- Ignore the daemon log config when building images [#29552](https://github.com/docker/docker/pull/29552) -* Move secret name or ID prefix resolving from client to daemon [#29218](https://github.com/docker/docker/pull/29218) -+ Allow adding rules to `cgroup devices.allow` on container create/run [#22563](https://github.com/docker/docker/pull/22563) -- Fix `cpu.cfs_quota_us` being reset when running `systemd daemon-reload` [#31736](https://github.com/docker/docker/pull/31736) - -### Swarm Mode - -+ Topology-aware scheduling [#30725](https://github.com/docker/docker/pull/30725) -+ Automatic service rollback on failure [#31108](https://github.com/docker/docker/pull/31108) -+ Worker and manager on the same node are now connected through a UNIX socket [docker/swarmkit#1828](https://github.com/docker/swarmkit/pull/1828), [docker/swarmkit#1850](https://github.com/docker/swarmkit/pull/1850), [docker/swarmkit#1851](https://github.com/docker/swarmkit/pull/1851) -* Improve raft transport package [docker/swarmkit#1748](https://github.com/docker/swarmkit/pull/1748) -* No automatic manager shutdown on demotion/removal [docker/swarmkit#1829](https://github.com/docker/swarmkit/pull/1829) -* Use TransferLeadership to make leader demotion safer [docker/swarmkit#1939](https://github.com/docker/swarmkit/pull/1939) -* Decrease default monitoring period [docker/swarmkit#1967](https://github.com/docker/swarmkit/pull/1967) -+ Add Service logs formatting [#31672](https://github.com/docker/docker/pull/31672) -* Fix service logs API to be able to specify stream [#31313](https://github.com/docker/docker/pull/31313) -+ Add `--stop-signal` for `service create` and `service update` [#30754](https://github.com/docker/docker/pull/30754) -+ Add `--read-only` for `service create` and `service update` [#30162](https://github.com/docker/docker/pull/30162) -+ Renew the context after communicating with the registry [#31586](https://github.com/docker/docker/pull/31586) -+ (experimental) Add `--tail` and `--since` options to `docker service logs` [#31500](https://github.com/docker/docker/pull/31500) -+ (experimental) Add `--no-task-ids` and `--no-trunc` options to `docker service logs` [#31672](https://github.com/docker/docker/pull/31672) - -### Windows - -* Block pulling Windows images on non-Windows daemons [#29001](https://github.com/docker/docker/pull/29001) - -## 17.03.1-ce (2017-03-27) - -### Remote API (v1.27) & Client - -* Fix autoremove on older api [#31692](https://github.com/docker/docker/pull/31692) -* Fix default network customization for a stack [#31258](https://github.com/docker/docker/pull/31258/) -* Correct CPU usage calculation in presence of offline CPUs and newer Linux [#31802](https://github.com/docker/docker/pull/31802) -* Fix issue where service healthcheck is `{}` in remote API [#30197](https://github.com/docker/docker/pull/30197) - -### Runtime - -* Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666) - * Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266) -* Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852) - * Register healthcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) -* Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773) -* Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069) -* Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450) - -### Swarm Mode - -* Update swarmkit to 17756457ad6dc4d8a639a1f0b7a85d1b65a617bb [#31807](https://github.com/docker/docker/pull/31807) - * Scheduler now correctly considers tasks which have been assigned to a node but aren't yet running [docker/swarmkit#1980](https://github.com/docker/swarmkit/pull/1980) - * Allow removal of a network when only dead tasks reference it [docker/swarmkit#2018](https://github.com/docker/swarmkit/pull/2018) - * Retry failed network allocations less aggressively [docker/swarmkit#2021](https://github.com/docker/swarmkit/pull/2021) - * Avoid network allocation for tasks that are no longer running [docker/swarmkit#2017](https://github.com/docker/swarmkit/pull/2017) - * Bookkeeping fixes inside network allocator allocator [docker/swarmkit#2019](https://github.com/docker/swarmkit/pull/2019) [docker/swarmkit#2020](https://github.com/docker/swarmkit/pull/2020) - -### Windows - -* Cleanup HCS on restore [#31503](https://github.com/docker/docker/pull/31503) - -## 17.03.0-ce (2017-03-01) - -**IMPORTANT**: Starting with this release, Docker is on a monthly release cycle and uses a -new YY.MM versioning scheme to reflect this. Two channels are available: monthly and quarterly. -Any given monthly release will only receive security and bugfixes until the next monthly -release is available. Quarterly releases receive security and bugfixes for 4 months after -initial release. This release includes bugfixes for 1.13.1 but -there are no major feature additions and the API version stays the same. -Upgrading from Docker 1.13.1 to 17.03.0 is expected to be simple and low-risk. - -### Client - -* Fix panic in `docker stats --format` [#30776](https://github.com/docker/docker/pull/30776) - -### Contrib - -* Update various `bash` and `zsh` completion scripts [#30823](https://github.com/docker/docker/pull/30823), [#30945](https://github.com/docker/docker/pull/30945) and more... -* Block obsolete socket families in default seccomp profile - mitigates unpatched kernels' CVE-2017-6074 [#29076](https://github.com/docker/docker/pull/29076) - -### Networking - -* Fix bug on overlay encryption keys rotation in cross-datacenter swarm [#30727](https://github.com/docker/docker/pull/30727) -* Fix side effect panic in overlay encryption and network control plane communication failure ("No installed keys could decrypt the message") on frequent swarm leader re-election [#25608](https://github.com/docker/docker/pull/25608) -* Several fixes around system responsiveness and datapath programming when using overlay network with external kv-store [docker/libnetwork#1639](https://github.com/docker/libnetwork/pull/1639), [docker/libnetwork#1632](https://github.com/docker/libnetwork/pull/1632) and more... -* Discard incoming plain vxlan packets for encrypted overlay network [#31170](https://github.com/docker/docker/pull/31170) -* Release the network attachment on allocation failure [#31073](https://github.com/docker/docker/pull/31073) -* Fix port allocation when multiple published ports map to the same target port [docker/swarmkit#1835](https://github.com/docker/swarmkit/pull/1835) - -### Runtime - -* Fix a deadlock in docker logs [#30223](https://github.com/docker/docker/pull/30223) -* Fix cpu spin waiting for log write events [#31070](https://github.com/docker/docker/pull/31070) -* Fix a possible crash when using journald [#31231](https://github.com/docker/docker/pull/31231) [#31263](https://github.com/docker/docker/pull/31263) -* Fix a panic on close of nil channel [#31274](https://github.com/docker/docker/pull/31274) -* Fix duplicate mount point for `--volumes-from` in `docker run` [#29563](https://github.com/docker/docker/pull/29563) -* Fix `--cache-from` does not cache last step [#31189](https://github.com/docker/docker/pull/31189) - -### Swarm Mode - -* Shutdown leaks an error when the container was never started [#31279](https://github.com/docker/docker/pull/31279) -* Fix possibility of tasks getting stuck in the "NEW" state during a leader failover [docker/swarmkit#1938](https://github.com/docker/swarmkit/pull/1938) -* Fix extraneous task creations for global services that led to confusing replica counts in `docker service ls` [docker/swarmkit#1957](https://github.com/docker/swarmkit/pull/1957) -* Fix problem that made rolling updates slow when `task-history-limit` was set to 1 [docker/swarmkit#1948](https://github.com/docker/swarmkit/pull/1948) -* Restart tasks elsewhere, if appropriate, when they are shut down as a result of nodes no longer satisfying constraints [docker/swarmkit#1958](https://github.com/docker/swarmkit/pull/1958) -* (experimental) - -## 1.13.1 (2017-02-08) - -**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, -the `overlay2`, or `overlay` is now used by default (if the kernel supports it). -To use devicemapper, you can manually configure the storage driver to use through -the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` -configuration file. - -**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental -version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 -_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. - -If you have already upgraded to Docker 1.13 without uninstalling -previously-installed plugins, you may see this message when the Docker daemon -starts: - - Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv - -To manually remove all plugins and resolve this problem, take the following steps: - -1. Remove plugins.json from: `/var/lib/docker/plugins/`. -2. Restart Docker. Verify that the Docker daemon starts with no errors. -3. Reinstall your plugins. - -### Contrib - -* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) -* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) - -### Remote API (v1.26) & Client - -+ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) - -### Runtime - -* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) -* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) -* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) -* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) - -### Plugins - -* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) -+ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) - -### Windows - -* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) -* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) - -## 1.13.0 (2017-01-18) - -**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, -the `overlay2`, or `overlay` is now used by default (if the kernel supports it). -To use devicemapper, you can manually configure the storage driver to use through -the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` -configuration file. - -**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental -version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 -_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. - -If you have already upgraded to Docker 1.13 without uninstalling -previously-installed plugins, you may see this message when the Docker daemon -starts: - - Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv - -To manually remove all plugins and resolve this problem, take the following steps: - -1. Remove plugins.json from: `/var/lib/docker/plugins/`. -2. Restart Docker. Verify that the Docker daemon starts with no errors. -3. Reinstall your plugins. - -### Builder - -+ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) -+ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) -* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) -- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) -+ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) -+ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) -- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) -- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) -* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) -- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) -+ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) -+ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) - -### Contrib - -+ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) -+ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) -+ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) -- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) -+ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) -* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) -+ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) -+ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) -+ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) - -### Distribution - -* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) - - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) - - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) - - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) - - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) - - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) - - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) - - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) - - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) -- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) -* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) - -### Logging - -* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) -- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) -+ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) -+ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) -+ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) -+ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) -* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) -- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) -- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) -- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) -- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) - -### Networking - -+ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) -+ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) -+ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) -* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) -+ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) -- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) -- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) -- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) -+ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) -- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) -- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) - -### Plugins - -- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) -- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) -* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) -+ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) -+ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) -+ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) -* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) -* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) -* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) - -### Remote API (v1.25) & Client - -+ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) -+ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) -+ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) -* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) -+ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) -+ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) -+ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) -+ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) -+ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) -+ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) -+ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) -+ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) -+ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) -* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) -+ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) -+ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) -- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) -+ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) -+ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) -* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) -+ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) -- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) -* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) -- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) -+ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) -* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) -* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) -+ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) -+ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) -+ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) -+ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) -+ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) - -### Runtime - -+ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) -+ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) -+ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) -+ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) -+ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) -+ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) -+ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) -+ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) -* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) -+ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) -* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) -* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) -- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) -- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) -- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) -- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) -- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) -+ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) -- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) -+ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) -+ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) -+ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) -+ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) -- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) -- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) -* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) -+ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) -- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) -- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) -+ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) -* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) - -### Swarm Mode - -+ Add secret management [#27794](https://github.com/docker/docker/pull/27794) -+ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) -* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) -* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) -* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) -+ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) -+ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) -+ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) -+ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) -* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) -- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) -- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) -* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) -+ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) -+ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) -+ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) -* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) -* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) -+ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) -- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) -+ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) -+ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) -* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) -+ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) -- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) -+ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) -+ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) -+ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) -+ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) - -### Volume - -+ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) -+ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) -* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) -* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) -* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) - -### Security - -- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) -- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) - -### Deprecation - -- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) -- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) -- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) -- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) -- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) -- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) -- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) -- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) -- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) -- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) -- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) - -## 1.12.6 (2017-01-10) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - -**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or -that the IPAM driver can provide one when you specify the `--ipv6` option. - -If you are currently using the `--ipv6` option _without_ specifying the -`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the -following message: - -```none -Error starting daemon: Error initializing network controller: Error creating - default "bridge" network: failed to parse pool request - for address space "LocalDefault" pool " subpool ": - could not find an available, non-overlapping IPv6 address - pool among the defaults to assign to the network -``` - -To resolve this error, either remove the `--ipv6` flag (to preserve the same -behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the -value of the `--fixed-cidr-v6` flag. - -In a similar way, if you specify the `--ipv6` flag when creating a network -with the default IPAM driver, without providing an IPv6 `--subnet`, network -creation will fail with the following message: - -```none -Error response from daemon: failed to parse pool request for address space - "LocalDefault" pool "" subpool "": could not find an - available, non-overlapping IPv6 address pool among - the defaults to assign to the network -``` - -To resolve this, either remove the `--ipv6` flag (to preserve the same behavior -as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the -`--subnet` flag. - -The network network creation will instead succeed if you use an external IPAM driver -which supports automatic allocation of IPv6 subnets. - -### Runtime - -- Fix runC privilege escalation (CVE-2016-9962) - -## 1.12.5 (2016-12-15) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - -**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or -that the IPAM driver can provide one when you specify the `--ipv6` option. - -If you are currently using the `--ipv6` option _without_ specifying the -`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the -following message: - -```none -Error starting daemon: Error initializing network controller: Error creating - default "bridge" network: failed to parse pool request - for address space "LocalDefault" pool " subpool ": - could not find an available, non-overlapping IPv6 address - pool among the defaults to assign to the network -``` - -To resolve this error, either remove the `--ipv6` flag (to preserve the same -behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the -value of the `--fixed-cidr-v6` flag. - -In a similar way, if you specify the `--ipv6` flag when creating a network -with the default IPAM driver, without providing an IPv6 `--subnet`, network -creation will fail with the following message: - -```none -Error response from daemon: failed to parse pool request for address space - "LocalDefault" pool "" subpool "": could not find an - available, non-overlapping IPv6 address pool among - the defaults to assign to the network -``` - -To resolve this, either remove the `--ipv6` flag (to preserve the same behavior -as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the -`--subnet` flag. - -The network network creation will instead succeed if you use an external IPAM driver -which supports automatic allocation of IPv6 subnets. - -### Runtime - -- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) - -### Networking - -- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) - -### Contrib - -- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) - -## 1.12.4 (2016-12-12) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Runtime - -- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) -- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) -- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) -- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) -- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) -- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) -- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) -- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) -- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) -- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) - -### Swarm Mode - -* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) - - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) - - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) -- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) - -### Networking - -* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) - - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) - - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) - - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) - - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) - - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) - * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) - - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) - - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) - - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) - - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) - - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) - -### Logging - -* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) - -### Contrib - -- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) -- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) -- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) - -## 1.12.3 (2016-10-26) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Runtime - -- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) -- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) -- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) -* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) -* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) -- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) -- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) - -### Swarm Mode - -- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) -* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) - * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) - * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) - * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) - - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) - -### Networking - -* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) - - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) - - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) - * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) - - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) - - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) - -### Logging - -* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) - -### Contrib - -* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) -* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) - -## 1.12.2 (2016-10-11) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Runtime - -- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) -* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) -* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) -- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) -- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) -- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) -- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) - -### Networking - -- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) -* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) - * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) - - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) - * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) - * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) - * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) - * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) - * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) - - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) - -### Swarm Mode - -* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) -* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) - * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) - - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) - - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) - * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) - - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) - - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) - -### Contrib - -* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) -* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) -- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) - -### Windows - -- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) - -## 1.12.1 (2016-08-18) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Client - -* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) -- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) -- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) -- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) -- Remove `service update --network-add` and `service update --network-rm` flags - because this feature is not yet implemented in 1.12, but was inadvertently added - to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) - -### Contrib - -+ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) -- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) - -### Networking - -- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) -- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) -- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) -- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) -- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) -- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) - -### Plugins (experimental) - -* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) -* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) -- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) -- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) - -### Runtime - -* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) -- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) -- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) - -### Security - -* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) -* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) - -### Swarm - -- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) -- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) -- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) -- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) -- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) -- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) -- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) -- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) -- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) - -### Volume - -- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) -- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) -- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) -- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) - -## 1.12.0 (2016-07-28) - - -**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - -**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two -additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for -installing docker, please make sure to update them accordingly. - -### Builder - -+ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) -+ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) -+ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) -+ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) -* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) -* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) -* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) -- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) - -### Contrib - -* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) -- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) - -### Distribution - -+ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) -* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) -* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) -* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) - -### Logging - -+ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) -+ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) -+ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) -+ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) -* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) -* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) - -### Networking - -+ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) -+ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) -+ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) -+ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) -+ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) -+ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) -+ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) -+ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) -* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) -* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) -* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) -- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) -- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) - -### Plugins (experimental) - -+ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) - -### Remote API (v1.24) & Client - -+ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) -+ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) -+ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) -+ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) -+ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) -+ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) -+ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) -+ devicemapper: expose Minimum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) -* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) -- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) -- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) -- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) -- authz: when request is denied return forbidden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) -- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) - -### Runtime - -+ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) -+ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) -+ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) -+ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) -+ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) -+ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) -+ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) -+ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) -+ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) -+ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) -+ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) -+ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) -+ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) -+ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) -+ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) -+ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) -* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) -* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) -- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) -- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) -- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) -- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) -- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) -- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) -- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) -- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) -- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) -- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) -- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) -- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) -- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) - -### Swarm Mode - -+ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) -+ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) -+ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) -+ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) - -### Volume - -+ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) -+ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) -+ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) -* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) -- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) -- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) - - -### Deprecation - -* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed - to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) -* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) -* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) -* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) -* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) -* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) -* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) - -## 1.11.2 (2016-05-31) - -### Networking - -- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) -- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) - -### Runtime - -- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) -- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) -- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) -- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) -- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) -- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) -- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) -- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) -- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) - - -## 1.11.1 (2016-04-26) - -### Distribution - -- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) - -### Documentation - -+ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) - -### Builder - -* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) - -### Networking - -- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) -- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) - -### Runtime - -- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) -- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) -- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) -- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) -- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) -- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) -- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) -- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) -- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) -- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) -- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` -- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) -- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) - -## 1.11.0 (2016-04-13) - -**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. - -### Builder - -- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) -- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) - -### Client - -* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) -+ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) -* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) -* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) -- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) -- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) -* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) -- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) -+ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) -+ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) -* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) -* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) -- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) -* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) -* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) -- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) -* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) - -### Distribution - -- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) -- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) -+ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) -+ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) -* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) -* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) -* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) -* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) -- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) -- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) - -### Logging - -- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) -* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) -* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) -* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) -+ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) -* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) -+ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) -+ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) - - -### Misc - -+ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) -+ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) -+ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) -* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) -- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) -- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) -* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) -* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) -* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) -* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) -+ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) -+ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) - -### Networking - -- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) -- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) -* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) -+ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) -* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) -- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) -* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) -+ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) -* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) -- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) -* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) -- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) -- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) -- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) -- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) -- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) -- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) -- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) -- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) - -### Plugins - -- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) -- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) - -### Runtime - -- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) -- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) -- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) -- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) - Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. -+ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) -+ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) -+ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) -* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) -* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) -- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) -- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) -- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) -- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) -- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) -- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) -* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) -- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) -* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) -+ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) -- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) -+ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) -- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) -+ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) -+ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) -- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) -* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) -- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) -* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) -* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) -+ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) -- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) -- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) -- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) -- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) - -### Security - -* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) -* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) -* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) -* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) -* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) - -### Volumes - -* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) -* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) -- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) -+ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) - -## 1.10.3 (2016-03-10) - -### Runtime - -- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) -- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) - -### Distribution - -- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) -- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) - -### Plugin system - -- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) -- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) -- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) - -### Security - -- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) - It was due to the `personality` syscall being blocked by the default seccomp profile. -- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) - It was due to the `ipc` syscall being blocked by the default seccomp profile. -- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) -- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) - -## 1.10.2 (2016-02-22) - -### Runtime - -- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) -- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) -- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) -- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) -- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) - -### Distribution - -- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) -- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) -- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) -- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) - -### Networking - -- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) - -### Volumes - -- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) - -### Security - -- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) - -## 1.10.1 (2016-02-11) - -### Runtime - -* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) -- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) -- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) -- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) -- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) - -### Security - -- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) - -### Distribution - -* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) -- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) - -### Networking - -- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) -- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) -- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) -- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) - -### Logging - -- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) - -### Volumes - -- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) - -### Misc - -- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) - -## 1.10.0 (2016-02-04) - -**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. -A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. -Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. -Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ - -### Runtime - -+ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) -+ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) -+ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) -+ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) -+ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) - This change is backward compatible in the API, but not on the CLI. -+ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) -+ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) -+ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) -+ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) -+ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) -+ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) -+ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) -+ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) -+ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) -+ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) -+ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) -* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) -* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) -* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) -* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) -* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) -* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) -* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) -* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) -- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) -- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) -- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) -- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) -- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) -- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) -- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) - -### Security - -+ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) -+ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) -+ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) -+ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) -+ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) - This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. - Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. -* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) - -### Distribution - -* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) - Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. - Images no longer depend on the parent chain but contain a list of layer references. - `docker load`/`docker save` tarballs now also contain content-addressable image configurations. - For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) -* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) -* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) -- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) - -### Networking - -+ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) -+ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) -+ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) -+ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) -+ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) -+ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) -+ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) -+ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) -+ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) -+ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) -+ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) -* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) -* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) -* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) -* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) -* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) -* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) -* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) -* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) -- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) -- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) -- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) -- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) - -### Logging - -+ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) -+ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) -* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) -* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) - -### Volumes - -+ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) -* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) - Existing plugins need to make use of these new APIs to satisfy users' expectation - For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) -- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) -- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) -- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) -- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) -- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) - -### Builder - -+ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) -- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) -- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) - -### Client - -+ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) -- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) - -### Misc - -* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) - -### Deprecations - -* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) -* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) -* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) -* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) -* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) -* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) - -## 1.9.1 (2015-11-21) - -### Runtime - -- Do not prevent daemon from booting if images could not be restored (#17695) -- Force IPC mount to unmount on daemon shutdown/init (#17539) -- Turn IPC unmount errors into warnings (#17554) -- Fix `docker stats` performance regression (#17638) -- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) -- Fix seldom panics (#17639, #17634, #17703) -- Fix opq whiteouts problems for files with dot prefix (#17819) -- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) -- devicemapper: fix displayed fs in docker info (#17974) -- selinux: only relabel if user requested so with the `z` option (#17450, #17834) -- Do not make network calls when normalizing names (#18014) - -### Client - -- Fix `docker login` on windows (#17738) -- Fix bug with `docker inspect` output when not connected to daemon (#17715) -- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) - -### Builder - -- Fix regression with symlink behavior in ADD/COPY (#17710) - -### Networking - -- Allow passing a network ID as an argument for `--net` (#17558) -- Fix connect to host and prevent disconnect from host for `host` network (#17476) -- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is - not the first block in the network (#17853) -- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) -- Allow port-mapping only for endpoints created on docker run (#17858) -- Fixed an endpoint delete issue with a possible stale sbox (#18102) - -### Distribution - -- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) - -## 1.9.0 (2015-11-03) - -### Runtime - -+ `docker stats` now returns block IO metrics (#15005) -+ `docker stats` now details network stats per interface (#15786) -+ Add `ancestor=` filter to `docker ps --filter` flag to filter -containers based on their ancestor images (#14570) -+ Add `label=` filter to `docker ps --filter` to filter containers -based on label (#16530) -+ Add `--kernel-memory` flag to `docker run` (#14006) -+ Add `--message` flag to `docker import` allowing to specify an optional -message (#15711) -+ Add `--privileged` flag to `docker exec` (#14113) -+ Add `--stop-signal` flag to `docker run` allowing to replace the container -process stopping signal (#15307) -+ Add a new `unless-stopped` restart policy (#15348) -+ Inspecting an image now returns tags (#13185) -+ Add container size information to `docker inspect` (#15796) -+ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) -- Remove the deprecated `/container/ps` endpoint from the API (#15972) -- Send and document correct HTTP codes for `/exec//start` (#16250) -- Share shm and mqueue between containers sharing IPC namespace (#15862) -- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) -- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted -with `ro` option (#14965) -- Improve `rmi` performance (#16890) -- Do not update /etc/hosts for the default bridge network, except for links (#17325) -- Fix conflict with duplicate container names (#17389) -- Fix an issue with incorrect template execution in `docker inspect` (#17284) -- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) - -### Client - -+ Allow `docker import` to import from local files (#11907) - -### Builder - -+ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different -stop-signal for the container process (#15307) -+ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` -that allows to add build-time environment variables (#15182) -- Improve cache miss performance (#16890) - -### Storage - -- devicemapper: Implement deferred deletion capability (#16381) - -### Networking - -+ `docker network` exits experimental and is part of standard release (#16645) -+ New network top-level concept, with associated subcommands and API (#16645) - WARNING: the API is different from the experimental API -+ Support for multiple isolated/micro-segmented networks (#16645) -+ Built-in multihost networking using VXLAN based overlay driver (#14071) -+ Support for third-party network plugins (#13424) -+ Ability to dynamically connect containers to multiple networks (#16645) -+ Support for user-defined IP address management via pluggable IPAM drivers (#16910) -+ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) -+ Add `--cluster-store-opt` for setting up TLS settings (#16644) -+ Add `--dns-opt` to the daemon (#16031) -- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, - `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. - Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect - the networking settings of a container per network. - -### Volumes - -+ New top-level `volume` subcommand and API (#14242) -- Move API volume driver settings to host-specific config (#15798) -- Print an error message if volume name is not unique (#16009) -- Ensure volumes created from Dockerfiles always use the local volume driver -(#15507) -- DEPRECATE auto-creating missing host paths for bind mounts (#16349) - -### Logging - -+ Add `awslogs` logging driver for Amazon CloudWatch (#15495) -+ Add generic `tag` log option to allow customizing container/image -information passed to driver (e.g. show container names) (#15384) -- Implement the `docker logs` endpoint for the journald driver (#13707) -- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) - -### Distribution - -+ `docker search` now works with partial names (#16509) -- Push optimization: avoid buffering to file (#15493) -- The daemon will display progress for images that were already being pulled -by another client (#15489) -- Only permissions required for the current action being performed are requested (#) -+ Renaming trust keys (and respective environment variables) from `offline` to -`root` and `tagging` to `repository` (#16894) -- DEPRECATE trust key environment variables -`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and -`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) - -### Security - -+ Add SELinux profiles to the rpm package (#15832) -- Fix various issues with AppArmor profiles provided in the deb package -(#14609) -- Add AppArmor policy that prevents writing to /proc (#15571) - -## 1.8.3 (2015-10-12) - -### Distribution - -- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) -- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) -+ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry - -## 1.8.2 (2015-09-10) - -### Distribution - -- Fixes rare edge case of handling GNU LongLink and LongName entries. -- Fix ^C on docker pull. -- Fix docker pull issues on client disconnection. -- Fix issue that caused the daemon to panic when loggers weren't configured properly. -- Fix goroutine leak pulling images from registry V2. - -### Runtime - -- Fix a bug mounting cgroups for docker daemons running inside docker containers. -- Initialize log configuration properly. - -### Client: - -- Handle `-q` flag in `docker ps` properly when there is a default format. - -### Networking - -- Fix several corner cases with netlink. - -### Contrib - -- Fix several issues with bash completion. - -## 1.8.1 (2015-08-12) - -### Distribution - -* Fix a bug where pushing multiple tags would result in invalid images - -## 1.8.0 (2015-08-11) - -### Distribution - -+ Trusted pull, push and build, disabled by default -* Make tar layers deterministic between registries -* Don't allow deleting the image of running containers -* Check if a tag name to load is a valid digest -* Allow one character repository names -* Add a more accurate error description for invalid tag name -* Make build cache ignore mtime - -### Cli - -+ Add support for DOCKER_CONFIG/--config to specify config file dir -+ Add --type flag for docker inspect command -+ Add formatting options to `docker ps` with `--format` -+ Replace `docker -d` with new subcommand `docker daemon` -* Zsh completion updates and improvements -* Add some missing events to bash completion -* Support daemon urls with base paths in `docker -H` -* Validate status= filter to docker ps -* Display when a container is in --net=host in docker ps -* Extend docker inspect to export image metadata related to graph driver -* Restore --default-gateway{,-v6} daemon options -* Add missing unpublished ports in docker ps -* Allow duration strings in `docker events` as --since/--until -* Expose more mounts information in `docker inspect` - -### Runtime - -+ Add new Fluentd logging driver -+ Allow `docker import` to load from local files -+ Add logging driver for GELF via UDP -+ Allow to copy files from host to containers with `docker cp` -+ Promote volume drivers from experimental to master -+ Add rollover options to json-file log driver, and --log-driver-opts flag -+ Add memory swappiness tuning options -* Remove cgroup read-only flag when privileged -* Make /proc, /sys, & /dev readonly for readonly containers -* Add cgroup bind mount by default -* Overlay: Export metadata for container and image in `docker inspect` -* Devicemapper: external device activation -* Devicemapper: Compare uuid of base device on startup -* Remove RC4 from the list of registry cipher suites -* Add syslog-facility option -* LXC execdriver compatibility with recent LXC versions -* Mark LXC execriver as deprecated (to be removed with the migration to runc) - -### Plugins - -* Separate plugin sockets and specs locations -* Allow TLS connections to plugins - -### Bug fixes - -- Add missing 'Names' field to /containers/json API output -- Make `docker rmi` of dangling images safe while pulling -- Devicemapper: Change default basesize to 100G -- Go Scheduler issue with sync.Mutex and gcc -- Fix issue where Search API endpoint would panic due to empty AuthConfig -- Set image canonical names correctly -- Check dockerinit only if lxc driver is used -- Fix ulimit usage of nproc -- Always attach STDIN if -i,--interactive is specified -- Show error messages when saving container state fails -- Fixed incorrect assumption on --bridge=none treated as disable network -- Check for invalid port specifications in host configuration -- Fix endpoint leave failure for --net=host mode -- Fix goroutine leak in the stats API if the container is not running -- Check for apparmor file before reading it -- Fix DOCKER_TLS_VERIFY being ignored -- Set umask to the default on startup -- Correct the message of pause and unpause a non-running container -- Adjust disallowed CpuShares in container creation -- ZFS: correctly apply selinux context -- Display empty string instead of when IP opt is nil -- `docker kill` returns error when container is not running -- Fix COPY/ADD quoted/json form -- Fix goroutine leak on logs -f with no output -- Remove panic in nat package on invalid hostport -- Fix container linking in Fedora 22 -- Fix error caused using default gateways outside of the allocated range -- Format times in inspect command with a template as RFC3339Nano -- Make registry client to accept 2xx and 3xx http status responses as successful -- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. -- Fix error when the docker ps format was not valid. -- Remove redundant ip forward check. -- Fix issue trying to push images to repository mirrors. -- Fix error cleaning up network entrypoints when there is an initialization issue. - -## 1.7.1 (2015-07-14) - -#### Runtime - -- Fix default user spawning exec process with `docker exec` -- Make `--bridge=none` not to configure the network bridge -- Publish networking stats properly -- Fix implicit devicemapper selection with static binaries -- Fix socket connections that hung intermittently -- Fix bridge interface creation on CentOS/RHEL 6.6 -- Fix local dns lookups added to resolv.conf -- Fix copy command mounting volumes -- Fix read/write privileges in volumes mounted with --volumes-from - -#### Remote API - -- Fix unmarshaling of Command and Entrypoint -- Set limit for minimum client version supported -- Validate port specification -- Return proper errors when attach/reattach fail - -#### Distribution - -- Fix pulling private images -- Fix fallback between registry V2 and V1 - -## 1.7.0 (2015-06-16) - -#### Runtime -+ Experimental feature: support for out-of-process volume plugins -* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag -* The `exec` command supports the `-u|--user` flag to specify the new process owner -+ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags -+ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` -+ Container block IO can be controlled in `docker run` using`--blkio-weight` -+ ZFS support -+ The `docker logs` command supports a `--since` argument -+ UTS namespace can be shared with the host with `docker run --uts=host` - -#### Quality -* Networking stack was entirely rewritten as part of the libnetwork effort -* Engine internals refactoring -* Volumes code was entirely rewritten to support the plugins effort -+ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting - -#### Build -+ Support ${variable:-value} and ${variable:+value} syntax for environment variables -+ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` -+ git context changes with branches and directories -* The .dockerignore file support exclusion rules - -#### Distribution -+ Client support for v2 mirroring support for the official registry - -#### Bugfixes -* Firewalld is now supported and will automatically be used when available -* mounting --device recursively - -## 1.6.2 (2015-05-13) - -#### Runtime -- Revert change prohibiting mounting into /sys - -## 1.6.1 (2015-05-07) - -#### Security -- Fix read/write /proc paths (CVE-2015-3630) -- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) -- Fix opening of file-descriptor 1 (CVE-2015-3627) -- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) -- Prohibit mount of /sys - -#### Runtime -- Update AppArmor policy to not allow mounts - -## 1.6.0 (2015-04-07) - -#### Builder -+ Building images from an image ID -+ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` -+ `commit --change` to apply specified Dockerfile instructions while committing the image -+ `import --change` to apply specified Dockerfile instructions while importing the image -+ Builds no longer continue in the background when canceled with CTRL-C - -#### Client -+ Windows Support - -#### Runtime -+ Container and image Labels -+ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within -+ Logging drivers, `json-file`, `syslog`, or `none` -+ Pulling images by ID -+ `--ulimit` to set the ulimit on a container -+ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) - -## 1.5.0 (2015-02-10) - -#### Builder -+ Dockerfile to use for a given `docker build` can be specified with the `-f` flag -* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache -* ADD and COPY instructions accept relative paths -* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier -* Improve performance when exposing a large number of ports - -#### Hack -+ Allow client-side only integration tests for Windows -* Include docker-py integration tests against Docker daemon as part of our test suites - -#### Packaging -+ Support for the new version of the registry HTTP API -* Speed up `docker push` for images with a majority of already existing layers -- Fixed contacting a private registry through a proxy - -#### Remote API -+ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command -+ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command -* Container `inspect` endpoint show the ID of `exec` commands running in this container -* Container `inspect` endpoint show the number of times Docker auto-restarted the container -* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' -- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes - -#### Runtime -+ Docker daemon has full IPv6 support -+ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools -+ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted -+ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag -* Major stability improvements for devicemapper storage driver -* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted -* Better integration with host system: per-container iptable rules are moved to the DOCKER chain -- Fixed container exiting on out of memory to return an invalid exit code - -#### Other -* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon - -## 1.4.1 (2014-12-15) - -#### Runtime -- Fix issue with volumes-from and bind mounts not being honored after create - -## 1.4.0 (2014-12-11) - -#### Notable Features since 1.3.0 -+ Set key=value labels to the daemon (displayed in `docker info`), applied with - new `-label` daemon flag -+ Add support for `ENV` in Dockerfile of the form: - `ENV name=value name2=value2...` -+ New Overlayfs Storage Driver -+ `docker info` now returns an `ID` and `Name` field -+ Filter events by event name, container, or image -+ `docker cp` now supports copying from container volumes -- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing - image. - -## 1.3.3 (2014-12-11) - -#### Security -- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) -- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) -- Validate image IDs (CVE-2014-9358) - -#### Runtime -- Fix an issue when image archives are being read slowly - -#### Client -- Fix a regression related to stdin redirection -- Fix a regression with `docker cp` when destination is the current directory - -## 1.3.2 (2014-11-20) - -#### Security -- Fix tar breakout vulnerability -* Extractions are now sandboxed chroot -- Security options are no longer committed to images - -#### Runtime -- Fix deadlock in `docker ps -f exited=1` -- Fix a bug when `--volumes-from` references a container that failed to start - -#### Registry -+ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 -* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag -- Skip the experimental registry v2 API when mirroring is enabled - -## 1.3.1 (2014-10-28) - -#### Security -* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -+ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified - -#### Runtime -- Fix issue where volumes would not be shared - -#### Client -- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` -- Fix docker run output to non-TTY stdout - -#### Builder -- Fix escaping `$` for environment variables -- Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` - -## 1.3.0 (2014-10-14) - -#### Notable features since 1.2.0 -+ Docker `exec` allows you to run additional processes inside existing containers -+ Docker `create` gives you the ability to create a container via the CLI without executing a process -+ `--security-opts` options to allow user to customize container labels and apparmor profiles -+ Docker `ps` filters -- Wildcard support to COPY/ADD -+ Move production URLs to get.docker.com from get.docker.io -+ Allocate IP address on the bridge inside a valid CIDR -+ Use drone.io for PR and CI testing -+ Ability to setup an official registry mirror -+ Ability to save multiple images with docker `save` - -## 1.2.0 (2014-08-20) - -#### Runtime -+ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime -+ Auto-restart containers using policies -+ Use /var/lib/docker/tmp for large temporary files -+ `--cap-add` and `--cap-drop` to tweak what linux capability you want -+ `--device` to use devices in containers - -#### Client -+ `docker search` on private registries -+ Add `exited` filter to `docker ps --filter` -* `docker rm -f` now kills instead of stop -+ Support for IPv6 addresses in `--dns` flag - -#### Proxy -+ Proxy instances in separate processes -* Small bug fix on UDP proxy - -## 1.1.2 (2014-07-23) - -#### Runtime -+ Fix port allocation for existing containers -+ Fix containers restart on daemon restart - -#### Packaging -+ Fix /etc/init.d/docker issue on Debian - -## 1.1.1 (2014-07-09) - -#### Builder -* Fix issue with ADD - -## 1.1.0 (2014-07-03) - -#### Notable features since 1.0.1 -+ Add `.dockerignore` support -+ Pause containers during `docker commit` -+ Add `--tail` to `docker logs` - -#### Builder -+ Allow a tar file as context for `docker build` -* Fix issue with white-spaces and multi-lines in `Dockerfiles` - -#### Runtime -* Overall performance improvements -* Allow `/` as source of `docker run -v` -* Fix port allocation -* Fix bug in `docker save` -* Add links information to `docker inspect` - -#### Client -* Improve command line parsing for `docker commit` - -#### Remote API -* Improve status code for the `start` and `stop` endpoints - -## 1.0.1 (2014-06-19) - -#### Notable features since 1.0.0 -* Enhance security for the LXC driver - -#### Builder -* Fix `ONBUILD` instruction passed to grandchildren - -#### Runtime -* Fix events subscription -* Fix /etc/hostname file with host networking -* Allow `-h` and `--net=none` -* Fix issue with hotplug devices in `--privileged` - -#### Client -* Fix artifacts with events -* Fix a panic with empty flags -* Fix `docker cp` on Mac OS X - -#### Miscellaneous -* Fix compilation on Mac OS X -* Fix several races - -## 1.0.0 (2014-06-09) - -#### Notable features since 0.12.0 -* Production support - -## 0.12.0 (2014-06-05) - -#### Notable features since 0.11.0 -* 40+ various improvements to stability, performance and usability -* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file -* Inherit file permissions from the host on `ADD` -* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer -* The `images` command has a `-f`/`--filter` option to filter the list of images -* Add `--force-rm` to clean up after a failed build -* Standardize JSON keys in Remote API to CamelCase -* Pull from a docker run now assumes `latest` tag if not specified -* Enhance security on Linux capabilities and device nodes - -## 0.11.1 (2014-05-07) - -#### Registry -- Fix push and pull to private registry - -## 0.11.0 (2014-05-07) - -#### Notable features since 0.10.0 - -* SELinux support for mount and process labels -* Linked containers can be accessed by hostname -* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces -* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon -* Logs can now be returned with an optional timestamp -* Docker now works with registries that support SHA-512 -* Multiple registry endpoints are supported to allow registry mirrors - -## 0.10.0 (2014-04-08) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. -- Follow symlinks inside container's root for ADD build instructions. -- Fix EXPOSE caching. - -#### Documentation -- Add the new options of `docker ps` to the documentation. -- Add the options of `docker restart` to the documentation. -- Update daemon docs and help messages for --iptables and --ip-forward. -- Updated apt-cacher-ng docs example. -- Remove duplicate description of --mtu from docs. -- Add missing -t and -v for `docker images` to the docs. -- Add fixes to the cli docs. -- Update libcontainer docs. -- Update images in docs to remove references to AUFS and LXC. -- Update the nodejs_web_app in the docs to use the new epel RPM address. -- Fix external link on security of containers. -- Update remote API docs. -- Add image size to history docs. -- Be explicit about binding to all interfaces in redis example. -- Document DisableNetwork flag in the 1.10 remote api. -- Document that `--lxc-conf` is lxc only. -- Add chef usage documentation. -- Add example for an image with multiple for `docker load`. -- Explain what `docker run -a` does in the docs. - -#### Contrib -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. -- Add check-config script to contrib. -- Fix fish shell completion. - -#### Hack -* Clean up "go test" output from "make test" to be much more readable/scannable. -* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. -+ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. -- Include contributed completions in Ubuntu PPA. -+ Add cli integration tests. -* Add tweaks to the hack scripts to make them simpler. - -#### Remote API -+ Add TLS auth support for API. -* Move git clone from daemon to client. -- Fix content-type detection in docker cp. -* Split API into 2 go packages. - -#### Runtime -* Support hairpin NAT without going through Docker server. -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). -- devicemapper: increase timeout in waitClose to 10 seconds. -- devicemapper: ensure we shut down thin pool cleanly. -- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. -- devicemapper: avoid AB-BA deadlock. -- devicemapper: make shutdown better/faster. -- improve alpha sorting in mflag. -- Remove manual http cookie management because the cookiejar is being used. -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Add FreeBSD support for the client. -- Merge auth package into registry. -- Add deprecation warning for -t on `docker pull`. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. -- Fix attach exit on darwin. -- Improve deprecation message. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Only unshare the mount namespace for execin. -- Merge existing config when committing. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Mount cgroups automatically if they're not mounted already. -- Use mock for search tests. -- Update to double-dash everywhere. -- Move .dockerenv parsing to lxc driver. -- Move all bind mounts in the container inside the namespace. -- Don't use separate bind mount for container. -- Always symlink /dev/ptmx for libcontainer. -- Don't kill by pid for other drivers. -- Add initial logging to libcontainer. -* Sort by port in `docker ps`. -- Move networking drivers into runtime top level package. -+ Add --no-prune to `docker rmi`. -+ Add time since exit in `docker ps`. -- graphdriver: add build tags. -- Prevent allocation of previously allocated ports & prevent improve port allocation. -* Add support for --since/--before in `docker ps`. -- Clean up container stop. -+ Add support for configurable dns search domains. -- Add support for relative WORKDIR instructions. -- Add --output flag for docker save. -- Remove duplication of DNS entries in config merging. -- Add cpuset.cpus to cgroups and native driver options. -- Remove docker-ci. -- Promote btrfs. btrfs is no longer considered experimental. -- Add --input flag to `docker load`. -- Return error when existing bridge doesn't match IP address. -- Strip comments before parsing line continuations to avoid interpreting instructions as comments. -- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. -- Add systemd implementation of cgroups and make containers show up as systemd units. -- Fix commit and import when no repository is specified. -- Remount /var/lib/docker as --private to fix scaling issue. -- Use the environment's proxy when pinging the remote registry. -- Reduce error level from harmless errors. -* Allow --volumes-from to be individual files. -- Fix expanding buffer in StdCopy. -- Set error regardless of attach or stdin. This fixes #3364. -- Add support for --env-file to load environment variables from files. -- Symlink /etc/mtab and /proc/mounts. -- Allow pushing a single tag. -- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. -- Don't throw error when starting an already running container. -- Fix dynamic port allocation limit. -- remove setupDev from libcontainer. -- Add API version to `docker version`. -- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. -- Fix --volumes-from mount failure. -- Allow non-privileged containers to create device nodes. -- Skip login tests because of external dependency on a hosted service. -- Deprecate `docker images --tree` and `docker images --viz`. -- Deprecate `docker insert`. -- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. -- Add specific error message when hitting 401 over HTTP on push. -- Fix absolute volume check. -- Remove volumes-from from the config. -- Move DNS options to hostconfig. -- Update the apparmor profile for libcontainer. -- Add deprecation notice for `docker commit -run`. - -## 0.9.1 (2014-03-24) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. - -#### Documentation -- Fix external link on security of containers. - -#### Contrib -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - -#### Hack -- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - -#### Remote API -- Fix content-type detection in `docker cp`. - -#### Runtime -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Only unshare the mount namespace for execin. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Merge existing config when committing. -- Fix panic in monitor. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Improve deprecation message. -- Fix attach exit on darwin. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: increase timeout in waitClose to 10 seconds. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. - -## 0.9.0 (2014-03-10) - -#### Builder -- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. -- Add error to docker build --rm. This adds missing error handling. -- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. -- Make `--rm` the default for `docker build`. - -#### Documentation -- Download the docker client binary for Mac over https. -- Update the titles of the install instructions & descriptions. -* Add instructions for upgrading boot2docker. -* Add port forwarding example in OS X install docs. -- Attempt to disentangle repository and registry. -- Update docs to explain more about `docker ps`. -- Update sshd example to use a Dockerfile. -- Rework some examples, including the Python examples. -- Update docs to include instructions for a container's lifecycle. -- Update docs documentation to discuss the docs branch. -- Don't skip cert check for an example & use HTTPS. -- Bring back the memory and swap accounting section which was lost when the kernel page was removed. -- Explain DNS warnings and how to fix them on systems running and using a local nameserver. - -#### Contrib -- Add Tanglu support for mkimage-debootstrap. -- Add SteamOS support for mkimage-debootstrap. - -#### Hack -- Get package coverage when running integration tests. -- Remove the Vagrantfile. This is being replaced with boot2docker. -- Fix tests on systems where aufs isn't available. -- Update packaging instructions and remove the dependency on lxc. - -#### Remote API -* Move code specific to the API to the api package. -- Fix header content type for the API. Makes all endpoints use proper content type. -- Fix registry auth & remove ping calls from CmdPush and CmdPull. -- Add newlines to the JSON stream functions. - -#### Runtime -* Do not ping the registry from the CLI. All requests to registries flow through the daemon. -- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. -- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. -- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. -* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. -- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. -- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. -- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. -- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. -- Fix custom bridge related options. This makes custom bridges work again. -+ Mount-bind the PTY as container console. This allows tmux/screen to run. -+ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. -+ Add native exec driver which uses libcontainer and make it the default exec driver. -- Add support for handling extended attributes in archives. -* Set the container MTU to be the same as the host MTU. -+ Add simple sha256 checksums for layers to speed up `docker push`. -* Improve kernel version parsing. -* Allow flag grouping (`docker run -it`). -- Remove chroot exec driver. -- Fix divide by zero to fix panic. -- Rewrite `docker rmi`. -- Fix docker info with lxc 1.0.0. -- Fix fedora tty with apparmor. -* Don't always append env vars, replace defaults with vars from config. -* Fix a goroutine leak. -* Switch to Go 1.2.1. -- Fix unique constraint error checks. -* Handle symlinks for Docker's data directory and for TMPDIR. -- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) -- Add apparmor profile for the native execution driver. -* Move system specific code from archive to pkg/system. -- Fix duplicate signal for `docker run -i -t` (issue #3336). -- Return correct process pid for lxc. -- Add a -G option to specify the group which unix sockets belong to. -+ Add `-f` flag to `docker rm` to force removal of running containers. -+ Kill ghost containers and restart all ghost containers when the docker daemon restarts. -+ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. - -## 0.8.1 (2014-02-18) - -#### Builder - -- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system -- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported - -#### Documentation - -* Update issue filing instructions -* Warn against the use of symlinks for Docker's storage folder -* Replace the Firefox example with an IceWeasel example -* Rewrite the PostgreSQL example using a Dockerfile and add more details to it -* Improve the OS X documentation - -#### Remote API - -- Fix broken images API for version less than 1.7 -- Use the right encoding for all API endpoints which return JSON -- Move remote api client to api/ -- Queue calls to the API using generic socket wait - -#### Runtime - -- Fix the use of custom settings for bridges and custom bridges -- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures -- Remove two panics which could make Docker crash in some situations -- Don't ping registry from the CLI client -- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks -- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration -- Remove directory when removing devicemapper device. This cleans up leftover mount directories -- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration -- Ensure `docker cp` stream is closed properly -- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port -+ Mount-bind the PTY as container console. This allows tmux and screen to run in a container -- Clean up archive closing. This fixes and improves archive handling -- Fix engine tests on systems where temp directories are symlinked -- Add test methods for save and load -- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart -- Support submodules when building from a GitHub repository -- Quote volume path to allow spaces -- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs - -## 0.8.0 (2014-02-04) - -#### Notable features since 0.7.0 - -* Images and containers can be removed much faster -* Building an image from source with docker build is now much faster -* The Docker daemon starts and stops much faster -* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations -* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations -* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar -* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers -With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages -* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change - -* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed -* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build -* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write -* Docker is officially supported on Mac OS X -* The Docker daemon supports systemd socket activation - -## 0.7.6 (2014-01-14) - -#### Builder - -* Do not follow symlink outside of build context - -#### Runtime - -- Remount bind mounts when ro is specified -* Use https for fetching docker version - -#### Other - -* Inline the test.docker.io fingerprint -* Add ca-certificates to packaging documentation - -## 0.7.5 (2014-01-09) - -#### Builder - -* Disable compression for build. More space usage but a much faster upload -- Fix ADD caching for certain paths -- Do not compress archive from git build - -#### Documentation - -- Fix error in GROUP add example -* Make sure the GPG fingerprint is inline in the documentation -* Give more specific advice on setting up signing of commits for DCO - -#### Runtime - -- Fix misspelled container names -- Do not add hostname when networking is disabled -* Return most recent image from the cache by date -- Return all errors from docker wait -* Add Content-Type Header "application/json" to GET /version and /info responses - -#### Other - -* Update DCO to version 1.1 -+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name -* Update Travis to check for new 1.1 DCO version - -## 0.7.4 (2014-01-07) - -#### Builder - -- Fix ADD caching issue with . prefixed path -- Fix docker build on devicemapper by reverting sparse file tar option -- Fix issue with file caching and prevent wrong cache hit -* Use same error handling while unmarshaling CMD and ENTRYPOINT - -#### Documentation - -* Simplify and streamline Amazon Quickstart -* Install instructions use unprefixed Fedora image -* Update instructions for mtu flag for Docker on GCE -+ Add Ubuntu Saucy to installation -- Fix for wrong version warning on master instead of latest - -#### Runtime - -- Only get the image's rootfs when we need to calculate the image size -- Correctly handle unmapping UDP ports -* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build -- Fix login message to say pull instead of push -- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN -* Make blank -H option default to the same as no -H was sent -* Extract cgroups utilities to own submodule - -#### Other - -+ Add Travis CI configuration to validate DCO and gofmt requirements -+ Add Developer Certificate of Origin Text -* Upgrade VBox Guest Additions -* Check standalone header when pinging a registry server - -## 0.7.3 (2014-01-02) - -#### Builder - -+ Update ADD to use the image cache, based on a hash of the added content -* Add error message for empty Dockerfile - -#### Documentation - -- Fix outdated link to the "Introduction" on www.docker.io -+ Update the docs to get wider when the screen does -- Add information about needing to install LXC when using raw binaries -* Update Fedora documentation to disentangle the docker and docker.io conflict -* Add a note about using the new `-mtu` flag in several GCE zones -+ Add FrugalWare installation instructions -+ Add a more complete example of `docker run` -- Fix API documentation for creating and starting Privileged containers -- Add missing "name" parameter documentation on "/containers/create" -* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration -- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 - -#### Hack - -- Add missing libdevmapper dependency to the packagers documentation -* Update minimum Go requirement to a hard line at Go 1.2+ -* Many minor improvements to the Vagrantfile -+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) -+ Add coverprofile generation reporting -- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually -* Update Dockerfile to be more canonical and have less spurious warnings during build -- Fix some miscellaneous `docker pull` progress bar display issues -* Migrate more miscellaneous packages under the "pkg" folder -* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" -* Reorganize syntax highlighting files under a common "contrib/syntax" directory -* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation -* Add support for container names in bash completion - -#### Packaging - -+ Add an official Docker client binary for Darwin (Mac OS X) -* Remove empty "Vendor" string and added "License" on deb package -+ Add a stubbed version of "/etc/default/docker" in the deb package - -#### Runtime - -* Update layer application to extract tars in place, avoiding file churn while handling whiteouts -- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) -* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) -+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions -- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files -* Update container name validation to include '.' -- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected -* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler -* Update to use proper box-drawing characters everywhere in `docker images -tree` -* Move MTU setting from LXC configuration to directly use netlink -* Add `-S` option to external tar invocation for more efficient spare file handling -+ Add arch/os info to User-Agent string, especially for registry requests -+ Add `-mtu` option to Docker daemon for configuring MTU -- Fix `docker build` to exit with a non-zero exit code on error -+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation - -## 0.7.2 (2013-12-16) - -#### Runtime - -+ Validate container names on creation with standard regex -* Increase maximum image depth to 127 from 42 -* Continue to move api endpoints to the job api -+ Add -bip flag to allow specification of dynamic bridge IP via CIDR -- Allow bridge creation when ipv6 is not enabled on certain systems -* Set hostname and IP address from within dockerinit -* Drop capabilities from within dockerinit -- Fix volumes on host when symlink is present the image -- Prevent deletion of image if ANY container is depending on it even if the container is not running -* Update docker push to use new progress display -* Use os.Lstat to allow mounting unix sockets when inspecting volumes -- Adjust handling of inactive user login -- Add missing defines in devicemapper for older kernels -- Allow untag operations with no container validation -- Add auth config to docker build - -#### Documentation - -* Add more information about Docker logging -+ Add RHEL documentation -* Add a direct example for changing the CMD that is run in a container -* Update Arch installation documentation -+ Add section on Trusted Builds -+ Add Network documentation page - -#### Other - -+ Add new cover bundle for providing code coverage reporting -* Separate integration tests in bundles -* Make Tianon the hack maintainer -* Update mkimage-debootstrap with more tweaks for keeping images small -* Use https to get the install script -* Remove vendored dotcloud/tar now that Go 1.2 has been released - -## 0.7.1 (2013-12-05) - -#### Documentation - -+ Add @SvenDowideit as documentation maintainer -+ Add links example -+ Add documentation regarding ambassador pattern -+ Add Google Cloud Platform docs -+ Add dockerfile best practices -* Update doc for RHEL -* Update doc for registry -* Update Postgres examples -* Update doc for Ubuntu install -* Improve remote api doc - -#### Runtime - -+ Add hostconfig to docker inspect -+ Implement `docker log -f` to stream logs -+ Add env variable to disable kernel version warning -+ Add -format to `docker inspect` -+ Support bind mount for files -- Fix bridge creation on RHEL -- Fix image size calculation -- Make sure iptables are called even if the bridge already exists -- Fix issue with stderr only attach -- Remove init layer when destroying a container -- Fix same port binding on different interfaces -- `docker build` now returns the correct exit code -- Fix `docker port` to display correct port -- `docker build` now check that the dockerfile exists client side -- `docker attach` now returns the correct exit code -- Remove the name entry when the container does not exist - -#### Registry - -* Improve progress bars, add ETA for downloads -* Simultaneous pulls now waits for the first to finish instead of failing -- Tag only the top-layer image when pushing to registry -- Fix issue with offline image transfer -- Fix issue preventing using ':' in password for registry - -#### Other - -+ Add pprof handler for debug -+ Create a Makefile -* Use stdlib tar that now includes fix -* Improve make.sh test script -* Handle SIGQUIT on the daemon -* Disable verbose during tests -* Upgrade to go1.2 for official build -* Improve unit tests -* The test suite now runs all tests even if one fails -* Refactor C in Go (Devmapper) -- Fix OS X compilation - -## 0.7.0 (2013-11-25) - -#### Notable features since 0.6.0 - -* Storage drivers: choose from aufs, device-mapper, or vfs. -* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. -* Links: compose complex software stacks by connecting containers to each other. -* Container naming: organize your containers by giving them memorable names. -* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. -* Offline transfer: push and pull images to the filesystem without losing information. -* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. - -## 0.6.7 (2013-11-21) - -#### Runtime - -* Improve stability, fixes some race conditions -* Skip the volumes mounted when deleting the volumes of container. -* Fix layer size computation: handle hard links correctly -* Use the work Path for docker cp CONTAINER:PATH -* Fix tmp dir never cleanup -* Speedup docker ps -* More informative error message on name collisions -* Fix nameserver regex -* Always return long id's -* Fix container restart race condition -* Keep published ports on docker stop;docker start -* Fix container networking on Fedora -* Correctly express "any address" to iptables -* Fix network setup when reconnecting to ghost container -* Prevent deletion if image is used by a running container -* Lock around read operations in graph - -#### RemoteAPI - -* Return full ID on docker rmi - -#### Client - -+ Add -tree option to images -+ Offline image transfer -* Exit with status 2 on usage error and display usage on stderr -* Do not forward SIGCHLD to container -* Use string timestamp for docker events -since - -#### Other - -* Update to go 1.2rc5 -+ Add /etc/default/docker support to upstart - -## 0.6.6 (2013-11-06) - -#### Runtime - -* Ensure container name on register -* Fix regression in /etc/hosts -+ Add lock around write operations in graph -* Check if port is valid -* Fix restart runtime error with ghost container networking -+ Add some more colors and animals to increase the pool of generated names -* Fix issues in docker inspect -+ Escape apparmor confinement -+ Set environment variables using a file. -* Prevent docker insert to erase something -+ Prevent DNS server conflicts in CreateBridgeIface -+ Validate bind mounts on the server side -+ Use parent image config in docker build -* Fix regression in /etc/hosts - -#### Client - -+ Add -P flag to publish all exposed ports -+ Add -notrunc and -q flags to docker history -* Fix docker commit, tag and import usage -+ Add stars, trusted builds and library flags in docker search -* Fix docker logs with tty - -#### RemoteAPI - -* Make /events API send headers immediately -* Do not split last column docker top -+ Add size to history - -#### Other - -+ Contrib: Desktop integration. Firefox usecase. -+ Dockerfile: bump to go1.2rc3 - -## 0.6.5 (2013-10-29) - -#### Runtime - -+ Containers can now be named -+ Containers can now be linked together for service discovery -+ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors -+ Automatically start crashed containers after a reboot -+ Expose IP, port, and proto as separate environment vars for container links -* Allow ports to be published to specific ips -* Prohibit inter-container communication by default -- Ignore ErrClosedPipe for stdin in Container.Attach -- Remove unused field kernelVersion -* Fix issue when mounting subdirectories of /mnt in container -- Fix untag during removal of images -* Check return value of syscall.Chdir when changing working directory inside dockerinit - -#### Client - -- Only pass stdin to hijack when needed to avoid closed pipe errors -* Use less reflection in command-line method invocation -- Monitor the tty size after starting the container, not prior -- Remove useless os.Exit() calls after log.Fatal - -#### Hack - -+ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian -* Add -p option to invoke debootstrap with http_proxy -- Update install.sh with $sh_c to get sudo/su for modprobe -* Update all the mkimage scripts to use --numeric-owner as a tar argument -* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues - -#### Other - -* Documentation: Fix the flags for nc in example -* Testing: Remove warnings and prevent mount issues -- Testing: Change logic for tty resize to avoid warning in tests -- Builder: Fix race condition in docker build with verbose output -- Registry: Fix content-type for PushImageJSONIndex method -* Contrib: Improve helper tools to generate debian and Arch linux server images - -## 0.6.4 (2013-10-16) - -#### Runtime - -- Add cleanup of container when Start() fails -* Add better comments to utils/stdcopy.go -* Add utils.Errorf for error logging -+ Add -rm to docker run for removing a container on exit -- Remove error messages which are not actually errors -- Fix `docker rm` with volumes -- Fix some error cases where an HTTP body might not be closed -- Fix panic with wrong dockercfg file -- Fix the attach behavior with -i -* Record termination time in state. -- Use empty string so TempDir uses the OS's temp dir automatically -- Make sure to close the network allocators -+ Autorestart containers by default -* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` -* lxc: Allow set_file_cap capability in container -- Move run -rm to the cli only -* Split stdout stderr -* Always create a new session for the container - -#### Testing - -- Add aggregated docker-ci email report -- Add cleanup to remove leftover containers -* Add nightly release to docker-ci -* Add more tests around auth.ResolveAuthConfig -- Remove a few errors in tests -- Catch errClosing error when TCP and UDP proxies are terminated -* Only run certain tests with TESTFLAGS='-run TestName' make.sh -* Prevent docker-ci to test closing PRs -* Replace panic by log.Fatal in tests -- Increase TestRunDetach timeout - -#### Documentation - -* Add initial draft of the Docker infrastructure doc -* Add devenvironment link to CONTRIBUTING.md -* Add `apt-get install curl` to Ubuntu docs -* Add explanation for export restrictions -* Add .dockercfg doc -* Remove Gentoo install notes about #1422 workaround -* Fix help text for -v option -* Fix Ping endpoint documentation -- Fix parameter names in docs for ADD command -- Fix ironic typo in changelog -* Various command fixes in postgres example -* Document how to edit and release docs -- Minor updates to `postgresql_service.rst` -* Clarify LGTM process to contributors -- Corrected error in the package name -* Document what `vagrant up` is actually doing -+ improve doc search results -* Cleanup whitespace in API 1.5 docs -* use angle brackets in MAINTAINER example email -* Update archlinux.rst -+ Changes to a new style for the docs. Includes version switcher. -* Formatting, add information about multiline json -* Improve registry and index REST API documentation -- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 -* Update Gentoo installation documentation now that we're in the portage tree proper -* Cleanup and reorganize docs and tooling for contributors and maintainers -- Minor spelling correction of protocoll -> protocol - -#### Contrib - -* Add vim syntax highlighting for Dockerfiles from @honza -* Add mkimage-arch.sh -* Reorganize contributed completion scripts to add zsh completion - -#### Hack - -* Add vagrant user to the docker group -* Add proper bash completion for "docker push" -* Add xz utils as a runtime dep -* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates -+ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link -* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly -+ Add @tianon to hack/MAINTAINERS -* Improve network performance for VirtualBox -* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) -- Fix contrib/mkimage-debian.sh apt caching prevention -+ Add Dockerfile.tmLanguage to contrib -* Configured FPM to make /etc/init/docker.conf a config file -* Enable SSH Agent forwarding in Vagrant VM -* Several small tweaks/fixes for contrib/mkimage-debian.sh - -#### Other - -- Builder: Abort build if mergeConfig returns an error and fix duplicate error message -- Packaging: Remove deprecated packaging directory -- Registry: Use correct auth config when logging in. -- Registry: Fix the error message so it is the same as the regex - -## 0.6.3 (2013-09-23) - -#### Packaging - -* Add 'docker' group on install for ubuntu package -* Update tar vendor dependency -* Download apt key over HTTPS - -#### Runtime - -- Only copy and change permissions on non-bindmount volumes -* Allow multiple volumes-from -- Fix HTTP imports from STDIN - -#### Documentation - -* Update section on extracting the docker binary after build -* Update development environment docs for new build process -* Remove 'base' image from documentation - -#### Other - -- Client: Fix detach issue -- Registry: Update regular expression to match index - -## 0.6.2 (2013-09-17) - -#### Runtime - -+ Add domainname support -+ Implement image filtering with path.Match -* Remove unnecessary warnings -* Remove os/user dependency -* Only mount the hostname file when the config exists -* Handle signals within the `docker login` command -- UID and GID are now also applied to volumes -- `docker start` set error code upon error -- `docker run` set the same error code as the process started - -#### Builder - -+ Add -rm option in order to remove intermediate containers -* Allow multiline for the RUN instruction - -#### Registry - -* Implement login with private registry -- Fix push issues - -#### Other - -+ Hack: Vendor all dependencies -* Remote API: Bump to v1.5 -* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvements - -## 0.6.1 (2013-08-23) - -#### Registry - -* Pass "meta" headers in API calls to the registry - -#### Packaging - -- Use correct upstart script with new build tool -- Use libffi-dev, don`t build it from sources -- Remove duplicate mercurial install command - -## 0.6.0 (2013-08-22) - -#### Runtime - -+ Add lxc-conf flag to allow custom lxc options -+ Add an option to set the working directory -* Add Image name to LogEvent tests -+ Add -privileged flag and relevant tests, docs, and examples -* Add websocket support to /container//attach/ws -* Add warning when net.ipv4.ip_forwarding = 0 -* Add hostname to environment -* Add last stable version in `docker version` -- Fix race conditions in parallel pull -- Fix Graph ByParent() to generate list of child images per parent image. -- Fix typo: fmt.Sprint -> fmt.Sprintf -- Fix small \n error un docker build -* Fix to "Inject dockerinit at /.dockerinit" -* Fix #910. print user name to docker info output -* Use Go 1.1.2 for dockerbuilder -* Use ranged for loop on channels -- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete -- Improve CMD, ENTRYPOINT, and attach docs. -- Improve connect message with socket error -- Load authConfig only when needed and fix useless WARNING -- Show tag used when image is missing -* Apply volumes-from before creating volumes -- Make docker run handle SIGINT/SIGTERM -- Prevent crash when .dockercfg not readable -- Install script should be fetched over https, not http. -* API, issue 1471: Use groups for socket permissions -- Correctly detect IPv4 forwarding -* Mount /dev/shm as a tmpfs -- Switch from http to https for get.docker.io -* Let userland proxy handle container-bound traffic -* Update the Docker CLI to specify a value for the "Host" header. -- Change network range to avoid conflict with EC2 DNS -- Reduce connect and read timeout when pinging the registry -* Parallel pull -- Handle ip route showing mask-less IP addresses -* Allow ENTRYPOINT without CMD -- Always consider localhost as a domain name when parsing the FQN repos name -* Refactor checksum - -#### Documentation - -* Add MongoDB image example -* Add instructions for creating and using the docker group -* Add sudo to examples and installation to documentation -* Add ufw doc -* Add a reference to ps -a -* Add information about Docker`s high level tools over LXC. -* Fix typo in docs for docker run -dns -* Fix a typo in the ubuntu installation guide -* Fix to docs regarding adding docker groups -* Update default -H docs -* Update readme with dependencies for building -* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 -* PostgreSQL service example in documentation -* Suggest installing linux-headers by default. -* Change the twitter handle -* Clarify Amazon EC2 installation -* 'Base' image is deprecated and should no longer be referenced in the docs. -* Move note about officially supported kernel -- Solved the logo being squished in Safari - -#### Builder - -+ Add USER instruction do Dockerfile -+ Add workdir support for the Buildfile -* Add no cache for docker build -- Fix docker build and docker events output -- Only count known instructions as build steps -- Make sure ENV instruction within build perform a commit each time -- Forbid certain paths within docker build ADD -- Repository name (and optionally a tag) in build usage -- Make sure ADD will create everything in 0755 - -#### Remote API - -* Sort Images by most recent creation date. -* Reworking opaque requests in registry module -* Add image name in /events -* Use mime pkg to parse Content-Type -* 650 http utils and user agent field - -#### Hack - -+ Bash Completion: Limit commands to containers of a relevant state -* Add docker dependencies coverage testing into docker-ci - -#### Packaging - -+ Docker-brew 0.5.2 support and memory footprint reduction -* Add new docker dependencies into docker-ci -- Revert "docker.upstart: avoid spawning a `sh` process" -+ Docker-brew and Docker standard library -+ Release docker with docker -* Fix the upstart script generated by get.docker.io -* Enabled the docs to generate manpages. -* Revert Bind daemon to 0.0.0.0 in Vagrant. - -#### Register - -* Improve auth push -* Registry unit tests + mock registry - -#### Tests - -* Improve TestKillDifferentUser to prevent timeout on buildbot -- Fix typo in TestBindMounts (runContainer called without image) -* Improve TestGetContainersTop so it does not rely on sleep -* Relax the lo interface test to allow iface index != 1 -* Add registry functional test to docker-ci -* Add some tests in server and utils - -#### Other - -* Contrib: bash completion script -* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host -* Don`t read from stdout when only attached to stdin - -## 0.5.3 (2013-08-13) - -#### Runtime - -* Use docker group for socket permissions -- Spawn shell within upstart script -- Handle ip route showing mask-less IP addresses -- Add hostname to environment - -#### Builder - -- Make sure ENV instruction within build perform a commit each time - -## 0.5.2 (2013-08-08) - -* Builder: Forbid certain paths within docker build ADD -- Runtime: Change network range to avoid conflict with EC2 DNS -* API: Change daemon to listen on unix socket by default - -## 0.5.1 (2013-07-30) - -#### Runtime - -+ Add `ps` args to `docker top` -+ Add support for container ID files (pidfile like) -+ Add container=lxc in default env -+ Support networkless containers with `docker run -n` and `docker -d -b=none` -* Stdout/stderr logs are now stored in the same file as JSON -* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. -* Change .dockercfg format to json and support multiple auth remote -- Do not override volumes from config -- Fix issue with EXPOSE override - -#### API - -+ Docker client now sets useragent (RFC 2616) -+ Add /events endpoint - -#### Builder - -+ ADD command now understands URLs -+ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables -- Create directories with 755 instead of 700 within ADD instruction - -#### Hack - -* Simplify unit tests with helpers -* Improve docker.upstart event -* Add coverage testing into docker-ci - -## 0.5.0 (2013-07-17) - -#### Runtime - -+ List all processes running inside a container with 'docker top' -+ Host directories can be mounted as volumes with 'docker run -v' -+ Containers can expose public UDP ports (eg, '-p 123/udp') -+ Optionally specify an exact public port (eg. '-p 80:4500') -* 'docker login' supports additional options -- Don't save a container`s hostname when committing an image. - -#### Registry - -+ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries -- Fix issues when uploading images to a private registry - -#### Builder - -+ ENTRYPOINT instruction sets a default binary entry point to a container -+ VOLUME instruction marks a part of the container as persistent data -* 'docker build' displays the full output of a build by default - -## 0.4.8 (2013-07-01) - -+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. -- Tests: Fix issues in the test suite - -## 0.4.7 (2013-06-28) - -#### Remote API - -* The progress bar updates faster when downloading and uploading large files -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -+ Host directories can be mounted as volumes with 'docker run -b' -- fix an issue when only attaching to stdin -* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts - -#### Hack - -* Improve test suite and dev environment -* Remove dependency on unit tests on 'os/user' - -#### Other - -* Registry: easier push/pull to a custom registry -+ Documentation: add terminology section - -## 0.4.6 (2013-06-22) - -- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. - -## 0.4.5 (2013-06-21) - -+ Builder: 'docker build git://URL' fetches and builds a remote git repository -* Runtime: 'docker ps -s' optionally prints container size -* Tests: improved and simplified -- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. -- Builder: fix a regression when using ADD with single regular file. - -## 0.4.4 (2013-06-19) - -- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. - -## 0.4.3 (2013-06-19) - -#### Builder - -+ ADD of a local file will detect tar archives and unpack them -* ADD improvements: use tar for copy + automatically unpack local archives -* ADD uses tar/untar for copies instead of calling 'cp -ar' -* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. -- Fix a bug which caused builds to fail if ADD was the first command -* Nicer output for 'docker build' - -#### Runtime - -* Remove bsdtar dependency -* Add unix socket and multiple -H support -* Prevent rm of running containers -* Use go1.1 cookiejar -- Fix issue detaching from running TTY container -- Forbid parallel push/pull for a single image/repo. Fixes #311 -- Fix race condition within Run command when attaching. - -#### Client - -* HumanReadable ProgressBar sizes in pull -* Fix docker version`s git commit output - -#### API - -* Send all tags on History API call -* Add tag lookup to history command. Fixes #882 - -#### Documentation - -- Fix missing command in irc bouncer example - -## 0.4.2 (2013-06-17) - -- Packaging: Bumped version to work around an Ubuntu bug - -## 0.4.1 (2013-06-17) - -#### Remote Api - -+ Add flag to enable cross domain requests -+ Add images and containers sizes in docker ps and docker images - -#### Runtime - -+ Configure dns configuration host-wide with 'docker -d -dns' -+ Detect faulty DNS configuration and replace it with a public default -+ Allow docker run : -+ You can now specify public port (ex: -p 80:4500) -* Improve image removal to garbage-collect unreferenced parents - -#### Client - -* Allow multiple params in inspect -* Print the container id before the hijack in `docker run` - -#### Registry - -* Add regexp check on repo`s name -* Move auth to the client -- Remove login check on pull - -#### Other - -* Vagrantfile: Add the rest api port to vagrantfile`s port_forward -* Upgrade to Go 1.1 -- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n - -## 0.4.0 (2013-06-03) - -#### Builder - -+ Introducing Builder -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile - -#### Remote API - -+ Introducing Remote API -+ control Docker programmatically using a simple HTTP/json API - -#### Runtime - -* Various reliability and usability improvements - -## 0.3.4 (2013-05-30) - -#### Builder - -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile -+ 'docker build -t FOO' applies the tag FOO to the newly built container. - -#### Runtime - -+ Interactive TTYs correctly handle window resize -* Fix how configuration is merged between layers - -#### Remote API - -+ Split stdout and stderr on 'docker run' -+ Optionally listen on a different IP and port (use at your own risk) - -#### Documentation - -* Improve install instructions. - -## 0.3.3 (2013-05-23) - -- Registry: Fix push regression -- Various bugfixes - -## 0.3.2 (2013-05-09) - -#### Registry - -* Improve the checksum process -* Use the size to have a good progress bar while pushing -* Use the actual archive if it exists in order to speed up the push -- Fix error 400 on push - -#### Runtime - -* Store the actual archive on commit - -## 0.3.1 (2013-05-08) - -#### Builder - -+ Implement the autorun capability within docker builder -+ Add caching to docker builder -+ Add support for docker builder with native API as top level command -+ Implement ENV within docker builder -- Check the command existence prior create and add Unit tests for the case -* use any whitespaces instead of tabs - -#### Runtime - -+ Add go version to debug infos -* Kernel version - don`t show the dash if flavor is empty - -#### Registry - -+ Add docker search top level command in order to search a repository -- Fix pull for official images with specific tag -- Fix issue when login in with a different user and trying to push -* Improve checksum - async calculation - -#### Images - -+ Output graph of images to dot (graphviz) -- Fix ByParent function - -#### Documentation - -+ New introduction and high-level overview -+ Add the documentation for docker builder -- CSS fix for docker documentation to make REST API docs look better. -- Fix CouchDB example page header mistake -- Fix README formatting -* Update www.docker.io website. - -#### Other - -+ Website: new high-level overview -- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc -* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker - -## 0.3.0 (2013-05-06) - -#### Runtime - -- Fix the command existence check -- strings.Split may return an empty string on no match -- Fix an index out of range crash if cgroup memory is not - -#### Documentation - -* Various improvements -+ New example: sharing data between 2 couchdb databases - -#### Other - -* Vagrant: Use only one deb line in /etc/apt -+ Registry: Implement the new registry - -## 0.2.2 (2013-05-03) - -+ Support for data volumes ('docker run -v=PATH') -+ Share data volumes between containers ('docker run -volumes-from') -+ Improve documentation -* Upgrade to Go 1.0.3 -* Various upgrades to the dev environment for contributors - -## 0.2.1 (2013-05-01) - -+ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. -* Improve install process on Vagrant -+ New Dockerfile operation: "maintainer" -+ New Dockerfile operation: "expose" -+ New Dockerfile operation: "cmd" -+ Contrib script to build a Debian base layer -+ 'docker -d -r': restart crashed containers at daemon startup -* Runtime: improve test coverage - -## 0.2.0 (2013-04-23) - -- Runtime: ghost containers can be killed and waited for -* Documentation: update install instructions -- Packaging: fix Vagrantfile -- Development: automate releasing binaries and ubuntu packages -+ Add a changelog -- Various bugfixes - -## 0.1.8 (2013-04-22) - -- Dynamically detect cgroup capabilities -- Issue stability warning on kernels <3.8 -- 'docker push' buffers on disk instead of memory -- Fix 'docker diff' for removed files -- Fix 'docker stop' for ghost containers -- Fix handling of pidfile -- Various bugfixes and stability improvements - -## 0.1.7 (2013-04-18) - -- Container ports are available on localhost -- 'docker ps' shows allocated TCP ports -- Contributors can run 'make hack' to start a continuous integration VM -- Streamline ubuntu packaging & uploading -- Various bugfixes and stability improvements - -## 0.1.6 (2013-04-17) - -- Record the author an image with 'docker commit -author' - -## 0.1.5 (2013-04-17) - -- Disable standalone mode -- Use a custom DNS resolver with 'docker -d -dns' -- Detect ghost containers -- Improve diagnosis of missing system capabilities -- Allow disabling memory limits at compile time -- Add debian packaging -- Documentation: installing on Arch Linux -- Documentation: running Redis on docker -- Fix lxc 0.9 compatibility -- Automatically load aufs module -- Various bugfixes and stability improvements - -## 0.1.4 (2013-04-09) - -- Full support for TTY emulation -- Detach from a TTY session with the escape sequence `C-p C-q` -- Various bugfixes and stability improvements -- Minor UI improvements -- Automatically create our own bridge interface 'docker0' - -## 0.1.3 (2013-04-04) - -- Choose TCP frontend port with '-p :PORT' -- Layer format is versioned -- Major reliability improvements to the process manager -- Various bugfixes and stability improvements - -## 0.1.2 (2013-04-03) - -- Set container hostname with 'docker run -h' -- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' -- Various bugfixes and stability improvements -- UI polish -- Progress bar on push/pull -- Use XZ compression by default -- Make IP allocator lazy - -## 0.1.1 (2013-03-31) - -- Display shorthand IDs for convenience -- Stabilize process management -- Layers can include a commit message -- Simplified 'docker attach' -- Fix support for re-attaching -- Various bugfixes and stability improvements -- Auto-download at run -- Auto-login on push -- Beefed up documentation - -## 0.1.0 (2013-03-23) - -Initial public release - -- Implement registry in order to push/pull images -- TCP port allocation -- Fix termcaps on Linux -- Add documentation -- Add Vagrant support with Vagrantfile -- Add unit tests -- Add repository/tags to ease image management -- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md deleted file mode 100644 index c45ae337..00000000 --- a/vendor/github.com/docker/docker/CONTRIBUTING.md +++ /dev/null @@ -1,458 +0,0 @@ -# Contribute to the Moby Project - -Want to hack on the Moby Project? Awesome! We have a contributor's guide that explains -[setting up a development environment and the contribution -process](docs/contributing/). - -[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) - -This page contains information about reporting issues as well as some tips and -guidelines useful to experienced open source contributors. Finally, make sure -you read our [community guidelines](#docker-community-guidelines) before you -start participating. - -## Topics - -* [Reporting Security Issues](#reporting-security-issues) -* [Design and Cleanup Proposals](#design-and-cleanup-proposals) -* [Reporting Issues](#reporting-other-issues) -* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) -* [Community Guidelines](#docker-community-guidelines) - -## Reporting security issues - -The Moby maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -Security reports are greatly appreciated and we will publicly thank you for it. -We also like to send gifts—if you're into schwag, make sure to let -us know. We currently do not offer a paid security bounty program, but are not -ruling it out in the future. - - -## Reporting other issues - -A great way to contribute to the project is to send a detailed report when you -encounter an issue. We always appreciate a well-written, thorough bug report, -and will thank you for it! - -Check that [our issue database](https://github.com/moby/moby/issues) -doesn't already include that problem or suggestion before submitting an issue. -If you find a match, you can use the "subscribe" button to get notified on -updates. Do *not* leave random "+1" or "I have this too" comments, as they -only clutter the discussion, and don't help resolving it. However, if you -have ways to reproduce the issue or have additional information that may help -resolving the issue, please leave a comment. - -When reporting issues, always include: - -* The output of `docker version`. -* The output of `docker info`. - -Also include the steps required to reproduce the problem if possible and -applicable. This information will help us review and fix your issue faster. -When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). -Don't forget to remove sensitive data from your logfiles before posting (you can -replace those parts with "REDACTED"). - -## Quick contribution tips and guidelines - -This section gives the experienced contributor some tips and guidelines. - -### Pull requests are always welcome - -Not sure if that typo is worth a pull request? Found a bug and know how to fix -it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/moby/moby/issues) before -anybody starts working on it. - -We are always thrilled to receive pull requests. We do our best to process them -quickly. If your pull request is not accepted on the first try, -don't get discouraged! Our contributor's guide explains [the review process we -use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). - -### Design and cleanup proposals - -You can propose new designs for existing Docker features. You can also design -entirely new features. We really appreciate contributors who want to refactor or -otherwise cleanup our project. For information on making these types of -contributions, see [the advanced contribution -section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in -the contributors guide. - -### Connect with other Moby Project contributors - - - - - - - - - - - - - - - - -
Forums - A public forum for users to discuss questions and explore current design patterns and - best practices about all the Moby projects. To participate, log in with your Github - account or create an account at https://forums.mobyproject.org. -
Slack -

- Register for the Docker Community Slack at - https://community.docker.com/registrations/groups/4316. - We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd. - Archives are available at https://dockercommunity.slackarchive.io/. -

-
Twitter - You can follow Moby Project Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
- - -### Conventions - -Fork the repository and make changes on your fork in a feature branch: - -- If it's a bug fix branch, name it XXXX-something where XXXX is the number of - the issue. -- If it's a feature branch, create an enhancement issue to announce - your intentions, and name it XXXX-something where XXXX is the number of the - issue. - -Submit tests for your changes. See [TESTING.md](./TESTING.md) for details. - -If your changes need integration tests, write them against the API. The `cli` -integration tests are slowly either migrated to API tests or moved away as unit -tests in `docker/cli` and end-to-end tests for Docker. - -Update the documentation when creating or modifying features. Test your -documentation changes for clarity, concision, and correctness, as well as a -clean documentation build. See our contributors guide for [our style -guide](https://docs.docker.com/opensource/doc-style) and instructions on [building -the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plug-ins that do this automatically. - -Pull request descriptions should be as clear as possible and include a reference -to all the issues that they address. - -### Successful Changes - -Before contributing large or high impact changes, make the effort to coordinate -with the maintainers of the project before submitting a pull request. This -prevents you from doing extra work that may or may not be merged. - -Large PRs that are just submitted without any prior communication are unlikely -to be successful. - -While pull requests are the methodology for submitting changes to code, changes -are much more likely to be accepted if they are accompanied by additional -engineering work. While we don't define this explicitly, most of these goals -are accomplished through communication of the design goals and subsequent -solutions. Often times, it helps to first state the problem before presenting -solutions. - -Typically, the best methods of accomplishing this are to submit an issue, -stating the problem. This issue can include a problem statement and a -checklist with requirements. If solutions are proposed, alternatives should be -listed and eliminated. Even if the criteria for elimination of a solution is -frivolous, say so. - -Larger changes typically work best with design documents. These are focused on -providing context to the design at the time the feature was conceived and can -inform future documentation contributions. - -### Commit Messages - -Commit messages must start with a capitalized and short summary (max. 50 chars) -written in the imperative, followed by an optional, more detailed explanatory -text which is separated from the summary by an empty line. - -Commit messages should follow best practices, including explaining the context -of the problem and how it was solved, including in caveats or follow up changes -required. They should tell the story of the change and provide readers -understanding of what led to it. - -If you're lost about what this even means, please see [How to Write a Git -Commit Message](http://chris.beams.io/posts/git-commit/) for a start. - -In practice, the best approach to maintaining a nice commit message is to -leverage a `git add -p` and `git commit --amend` to formulate a solid -changeset. This allows one to piece together a change, as information becomes -available. - -If you squash a series of commits, don't just submit that. Re-write the commit -message, as if the series of commits was a single stroke of brilliance. - -That said, there is no requirement to have a single commit for a PR, as long as -each commit tells the story. For example, if there is a feature that requires a -package, it might make sense to have the package in a separate commit then have -a subsequent commit that uses it. - -Remember, you're telling part of the story with the commit message. Don't make -your chapter weird. - -### Review - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Post -a comment after pushing. New commits show up in the pull request automatically, -but the reviewers are notified only when you comment. - -Pull requests must be cleanly rebased on top of master without multiple branches -mixed into the PR. - -**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your -feature branch to update your pull request rather than `merge master`. - -Before you make a pull request, squash your commits into logical units of work -using `git rebase -i` and `git push -f`. A logical unit of work is a consistent -set of patches that should be reviewed together: for example, upgrading the -version of a vendored dependency and taking advantage of its now available new -feature constitute two separate units of work. Implementing a new function and -calling it in another file constitute a single logical unit of work. The very -high majority of submissions should have a single commit, so if in doubt: squash -down to one. - -After every commit, [make sure the test suite passes](./TESTING.md). Include -documentation changes in the same pull request so that a revert would remove -all traces of the feature or fix. - -Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that -close an issue. Including references automatically closes the issue on a merge. - -Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly -from the Git history. - -Please see the [Coding Style](#coding-style) for further guidelines. - -### Merge approval - -Moby maintainers use LGTM (Looks Good To Me) in comments on the code review to -indicate acceptance, or use the Github review approval feature. - -For an explanation of the review and approval process see the -[REVIEWING](project/REVIEWING.md) page. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -### How can I become a maintainer? - -The procedures for adding new maintainers are explained in the -[/project/GOVERNANCE.md](/project/GOVERNANCE.md) -file in this repository. - -Don't forget: being a maintainer is a time investment. Make sure you -will have time to make yourself available. You don't have to be a -maintainer to make a difference on the project! - -### Manage issues and pull requests using the Derek bot - -If you want to help label, assign, close or reopen issues or pull requests -without commit rights, ask a maintainer to add your Github handle to the -`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends -Github's user permissions to help non-committers to manage issues and pull requests simply by commenting. - -For example: - -* Labels - -``` -Derek add label: kind/question -Derek remove label: status/claimed -``` - -* Assign work - -``` -Derek assign: username -Derek unassign: me -``` - -* Manage issues and PRs - -``` -Derek close -Derek reopen -``` - -## Moby community guidelines - -We want to keep the Moby community awesome, growing and collaborative. We need -your help to keep it that way. To help with this we've come up with some general -guidelines for the community as a whole: - -* Be nice: Be courteous, respectful and polite to fellow community members: - no regional, racial, gender, or other abuse will be tolerated. We like - nice people way better than mean ones! - -* Encourage diversity and participation: Make everyone in our community feel - welcome, regardless of their background and the extent of their - contributions, and do everything possible to encourage participation in - our community. - -* Keep it legal: Basically, don't get us in trouble. Share only content that - you own, do not share private or sensitive information, and don't break - the law. - -* Stay on topic: Make sure that you are posting to the correct channel and - avoid off-topic discussions. Remember when you update an issue or respond - to an email you are potentially sending to a large number of people. Please - consider this before you update. Also remember that nobody likes spam. - -* Don't send email to the maintainers: There's no need to send email to the - maintainers to ask them to investigate an issue or to take a look at a - pull request. Instead of sending an email, GitHub mentions should be - used to ping maintainers to review a pull request, a proposal or an - issue. - -The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc) -charter. For any concerns with the community process regarding technical contributions, -please contact the TSC. More information on project governance is available in -our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document. - -### Guideline violations — 3 strikes method - -The point of this section is not to find opportunities to punish people, but we -do need a fair way to deal with people who are making our community suck. - -1. First occurrence: We'll give you a friendly, but public reminder that the - behavior is inappropriate according to our guidelines. - -2. Second occurrence: We will send you a private message with a warning that - any additional violations will result in removal from the community. - -3. Third occurrence: Depending on the violation, we may need to delete or ban - your account. - -**Notes:** - -* Obvious spammers are banned on first occurrence. If we don't do this, we'll - have spam all over the place. - -* Violations are forgiven after 6 months of good behavior, and we won't hold a - grudge. - -* People who commit minor infractions will get some education, rather than - hammering them in the 3 strikes process. - -* The rules apply equally to everyone in the community, no matter how much - you've contributed. - -* Extreme violations of a threatening, abusive, destructive or illegal nature - will be addressed immediately and are not subject to 3 strikes or forgiveness. - -* Contact abuse@docker.com to report abuse or appeal violations. In the case of - appeals, we know that mistakes happen, and we'll work with you to come up with a - fair solution if there has been a misunderstanding. - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](https://golang.org/doc/effective_go.html). The -[Go Blog](https://blog.golang.org) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile deleted file mode 100644 index 29aedd47..00000000 --- a/vendor/github.com/docker/docker/Dockerfile +++ /dev/null @@ -1,248 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Use make to build a development environment image and run it in a container. -# # This is slow the first time. -# make BIND_DIR=. shell -# -# The following commands are executed inside the running container. - -# # Make a dockerd binary. -# # hack/make.sh binary -# -# # Install dockerd to /usr/local/bin -# # make install -# -# # Run unit tests -# # hack/test/unit -# -# # Run tests e.g. integration, py -# # hack/make.sh binary test-integration test-docker-py -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM golang:1.10.1 AS base -# FIXME(vdemeester) this is kept for other script depending on it to not fail right away -# Remove this once the other scripts uses something else to detect the version -ENV GO_VERSION 1.10.1 -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -FROM base AS criu -# Install CRIU for checkpoint/restore support -ENV CRIU_VERSION 3.6 -# Install dependancy packages specific to criu -RUN apt-get update && apt-get install -y \ - libnet-dev \ - libprotobuf-c0-dev \ - libprotobuf-dev \ - libnl-3-dev \ - libcap-dev \ - protobuf-compiler \ - protobuf-c-compiler \ - python-protobuf \ - && mkdir -p /usr/src/criu \ - && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ - && cd /usr/src/criu \ - && make \ - && make PREFIX=/opt/criu install-criu - -FROM base AS registry -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && case $(dpkg --print-architecture) in \ - amd64|ppc64*|s390x) \ - (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \ - GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ - go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ - ;; \ - esac \ - && rm -rf "$GOPATH" - - - -FROM base AS docker-py -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT - - - -FROM base AS swagger -# Install go-swagger for validating swagger.yaml -ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/go-swagger/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \ - && (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \ - && go build -o /usr/local/bin/swagger github.com/go-swagger/go-swagger/cmd/swagger \ - && rm -rf "$GOPATH" - - -FROM base AS frozen-images -RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh / -RUN /download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \ - busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c -# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) - -# Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd -FROM base AS runtime-dev -RUN apt-get update && apt-get install -y \ - libapparmor-dev \ - libseccomp-dev - - -FROM base AS tomlv -ENV INSTALL_BINARY_NAME=tomlv -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS vndr -ENV INSTALL_BINARY_NAME=vndr -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS containerd -RUN apt-get update && apt-get install -y btrfs-tools -ENV INSTALL_BINARY_NAME=containerd -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS proxy -ENV INSTALL_BINARY_NAME=proxy -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS gometalinter -ENV INSTALL_BINARY_NAME=gometalinter -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS dockercli -ENV INSTALL_BINARY_NAME=dockercli -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM runtime-dev AS runc -ENV INSTALL_BINARY_NAME=runc -COPY hack/dockerfile/install/install.sh ./install.sh -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - -FROM base AS tini -RUN apt-get update && apt-get install -y cmake vim-common -COPY hack/dockerfile/install/install.sh ./install.sh -ENV INSTALL_BINARY_NAME=tini -COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME - - - -# TODO: Some of this is only really needed for testing, it would be nice to split this up -FROM runtime-dev AS dev -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser -# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH -RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc -RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker -RUN ldconfig -# This should only install packages that are specifically needed for the dev environment and nothing else -# Do you really need to add another package here? Can it be done in a different build stage? -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - bash-completion \ - btrfs-tools \ - iptables \ - jq \ - libdevmapper-dev \ - libudev-dev \ - libsystemd-dev \ - binutils-mingw-w64 \ - g++-mingw-w64-x86-64 \ - net-tools \ - pigz \ - python-backports.ssl-match-hostname \ - python-dev \ - python-mock \ - python-pip \ - python-requests \ - python-setuptools \ - python-websocket \ - python-wheel \ - thin-provisioning-tools \ - vim \ - vim-common \ - xfsprogs \ - zip \ - bzip2 \ - xz-utils \ - --no-install-recommends -COPY --from=swagger /usr/local/bin/swagger* /usr/local/bin/ -COPY --from=frozen-images /docker-frozen-images /docker-frozen-images -COPY --from=gometalinter /opt/gometalinter/ /usr/local/bin/ -COPY --from=tomlv /opt/tomlv/ /usr/local/bin/ -COPY --from=vndr /opt/vndr/ /usr/local/bin/ -COPY --from=tini /opt/tini/ /usr/local/bin/ -COPY --from=runc /opt/runc/ /usr/local/bin/ -COPY --from=containerd /opt/containerd/ /usr/local/bin/ -COPY --from=proxy /opt/proxy/ /usr/local/bin/ -COPY --from=dockercli /opt/dockercli /usr/local/cli -COPY --from=registry /usr/local/bin/registry* /usr/local/bin/ -COPY --from=criu /opt/criu/ /usr/local/ -COPY --from=docker-py /docker-py /docker-py -# TODO: This is for the docker-py tests, which shouldn't really be needed for -# this image, but currently CI is expecting to run this image. This should be -# split out into a separate image, including all the `python-*` deps installed -# above. -RUN cd /docker-py \ - && pip install docker-pycreds==0.2.1 \ - && pip install yamllint==1.5.0 \ - && pip install -r test-requirements.txt - -ENV PATH=/usr/local/cli:$PATH -ENV DOCKER_BUILDTAGS apparmor seccomp selinux -# Options for hack/validate/gometalinter -ENV GOMETALINTER_OPTS="--deadline=2m" -WORKDIR /go/src/github.com/docker/docker -VOLUME /var/lib/docker -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.e2e b/vendor/github.com/docker/docker/Dockerfile.e2e deleted file mode 100644 index 78e863b1..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.e2e +++ /dev/null @@ -1,74 +0,0 @@ -## Step 1: Build tests -FROM golang:1.10.1-alpine3.7 as builder - -RUN apk add --update \ - bash \ - btrfs-progs-dev \ - build-base \ - curl \ - lvm2-dev \ - jq \ - && rm -rf /var/cache/apk/* - -RUN mkdir -p /go/src/github.com/docker/docker/ -WORKDIR /go/src/github.com/docker/docker/ - -# Generate frozen images -COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh -RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \ - busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c - -# Install dockercli -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN ./hack/dockerfile/install/install.sh dockercli - -# Set tag and add sources -ARG DOCKER_GITCOMMIT -ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT:-undefined} -ADD . . - -# Build DockerSuite.TestBuild* dependency -RUN CGO_ENABLED=0 go build -buildmode=pie -o /output/httpserver github.com/docker/docker/contrib/httpserver - -# Build the integration tests and copy the resulting binaries to /output/tests -RUN hack/make.sh build-integration-test-binary -RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \; - -## Step 2: Generate testing image -FROM alpine:3.7 as runner - -# GNU tar is used for generating the emptyfs image -RUN apk add --update \ - bash \ - ca-certificates \ - g++ \ - git \ - iptables \ - pigz \ - tar \ - xz \ - && rm -rf /var/cache/apk/* - -# Add an unprivileged user to be used for tests which need it -RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash - -COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile -COPY contrib/syscall-test /tests/contrib/syscall-test -COPY integration-cli/fixtures /tests/integration-cli/fixtures - -COPY hack/test/e2e-run.sh /scripts/run.sh -COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh - -COPY --from=builder /output/docker-frozen-images /docker-frozen-images -COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver -COPY --from=builder /output/tests /tests -COPY --from=builder /usr/local/bin/docker /usr/bin/docker - -ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/ - -ENTRYPOINT ["/scripts/run.sh"] diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple deleted file mode 100644 index 44f29f07..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.simple +++ /dev/null @@ -1,61 +0,0 @@ -# docker build -t docker:simple -f Dockerfile.simple . -# docker run --rm docker:simple hack/make.sh dynbinary -# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit -# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration - -# This represents the bare minimum required to build and test Docker. - -FROM debian:stretch - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -# Compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - btrfs-tools \ - build-essential \ - curl \ - cmake \ - gcc \ - git \ - libapparmor-dev \ - libdevmapper-dev \ - libseccomp-dev \ - ca-certificates \ - e2fsprogs \ - iptables \ - pkg-config \ - pigz \ - procps \ - xfsprogs \ - xz-utils \ - \ - aufs-tools \ - vim-common \ - && rm -rf /var/lib/apt/lists/* - -# Install Go -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -ENV GO_VERSION 1.10.1 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go -ENV CGO_LDFLAGS -L/lib - -# Install runc, containerd, tini and docker-proxy -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN for i in runc containerd tini proxy dockercli; \ - do hack/dockerfile/install/install.sh $i; \ - done -ENV PATH=/usr/local/cli:$PATH - -ENV AUTO_GOPATH 1 -WORKDIR /usr/src/docker -COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows deleted file mode 100644 index 0b539375..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.windows +++ /dev/null @@ -1,256 +0,0 @@ -# escape=` - -# ----------------------------------------------------------------------------------------- -# This file describes the standard way to build Docker in a container on Windows -# Server 2016 or Windows 10. -# -# Maintainer: @jhowardmsft -# ----------------------------------------------------------------------------------------- - - -# Prerequisites: -# -------------- -# -# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major -# build number must be at least 14393. This can be confirmed, for example, by -# running the following from an elevated PowerShell prompt - this sample output -# is from a fully up to date machine as at mid-November 2016: -# -# >> PS C:\> $(gin).WindowsBuildLabEx -# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 -# -# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. -# -# 3. The machine must be configured to run containers. For example, by following -# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or -# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md -# -# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server -# containers as the default option, it is recommended you have at least 1GB -# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you -# should have at least 4GB of memory assigned. Note also, to run Hyper-V -# containers in a VM, it is necessary to configure the VM for nested virtualization. - -# ----------------------------------------------------------------------------------------- - - -# Usage: -# ----- -# -# The following steps should be run from an (elevated*) Windows PowerShell prompt. -# -# (*In a default installation of containers on Windows following the quick-start guidance at -# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, -# the docker.exe client must run elevated to be able to connect to the daemon). -# -# 1. Clone the sources from github.com: -# -# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker -# >> Cloning into 'C:\go\src\github.com\docker\docker'... -# >> remote: Counting objects: 186216, done. -# >> remote: Compressing objects: 100% (21/21), done. -# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 -# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. -# >> Resolving deltas: 100% (123139/123139), done. -# >> Checking connectivity... done. -# >> Checking out files: 100% (3912/3912), done. -# >> PS C:\> -# -# -# 2. Change directory to the cloned docker sources: -# -# >> cd C:\go\src\github.com\docker\docker -# -# -# 3. Build a docker image with the components required to build the docker binaries from source -# by running one of the following: -# -# >> docker build -t nativebuildimage -f Dockerfile.windows . -# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) -# -# -# 4. Build the docker executable binaries by running one of the following: -# -# >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) -# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary -# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) -# -# -# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination -# folder on the host system where you want the binaries to be located. -# -# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe -# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe -# -# -# 6. (Optional) Remove the interim container holding the built executable binaries: -# -# >> docker rm binaries -# -# -# 7. (Optional) Remove the image used for the container in which the executable -# binaries are build. Tip - it may be useful to keep this image around if you need to -# build multiple times. Then you can take advantage of the builder cache to have an -# image which has all the components required to build the binaries already installed. -# -# >> docker rmi nativebuildimage -# - -# ----------------------------------------------------------------------------------------- - - -# The validation tests can only run directly on the host. This is because they calculate -# information from the git repo, but the .git directory is not passed into the image as -# it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt -# (elevation is not required): (Note Go must be installed to run these tests) -# -# >> hack\make.ps1 -DCO -PkgImports -GoFormat - - -# ----------------------------------------------------------------------------------------- - - -# To run unit tests, ensure you have created the nativebuildimage above. Then run one of -# the following from an (elevated) Windows PowerShell prompt: -# -# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit -# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) - - -# ----------------------------------------------------------------------------------------- - - -# To run unit tests and binary build, ensure you have created the nativebuildimage above. Then -# run one of the following from an (elevated) Windows PowerShell prompt: -# -# >> docker run nativebuildimage hack\make.ps1 -All -# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) - -# ----------------------------------------------------------------------------------------- - - -# Important notes: -# --------------- -# -# Don't attempt to use a bind mount to pass a local directory as the bundles target -# directory. It does not work (golang attempts for follow a mapped folder incorrectly). -# Instead, use docker cp as per the example. -# -# go.zip is not removed from the image as it is used by the Windows CI servers -# to ensure the host and image are running consistent versions of go. -# -# Nanoserver support is a work in progress. Although the image will build if the -# FROM statement is updated, it will not work when running autogen through hack\make.ps1. -# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently -# quit due to the use of console hooks which are not available. -# -# The docker integration tests do not currently run in a container on Windows, predominantly -# due to Windows not supporting privileged mode, so anything using a volume would fail. -# They (along with the rest of the docker CI suite) can be run using -# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. -# -# ----------------------------------------------------------------------------------------- - - -# The number of build steps below are explicitly minimised to improve performance. -FROM microsoft/windowsservercore - -# Use PowerShell as the default shell -SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] - -# Environment variable notes: -# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. -# - FROM_DOCKERFILE is used for detection of building within a container. -ENV GO_VERSION=1.10.1 ` - GIT_VERSION=2.11.1 ` - GOPATH=C:\go ` - FROM_DOCKERFILE=1 - -RUN ` - Function Test-Nano() { ` - $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` - return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` - }` - ` - Function Download-File([string] $source, [string] $target) { ` - if (Test-Nano) { ` - $handler = New-Object System.Net.Http.HttpClientHandler; ` - $client = New-Object System.Net.Http.HttpClient($handler); ` - $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` - $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` - $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` - $responseMsg.Wait(); ` - if (!$responseMsg.IsCanceled) { ` - $response = $responseMsg.Result; ` - if ($response.IsSuccessStatusCode) { ` - $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` - $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` - $copyStreamOp.Wait(); ` - $downloadedFileStream.Close(); ` - if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` - } ` - } else { ` - Throw ("Failed to download " + $source) ` - }` - } else { ` - $webClient = New-Object System.Net.WebClient; ` - $webClient.DownloadFile($source, $target); ` - } ` - } ` - ` - setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` - ` - Write-Host INFO: Downloading git...; ` - $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` - Download-File $location C:\gitsetup.zip; ` - ` - Write-Host INFO: Downloading go...; ` - Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` - ` - Write-Host INFO: Downloading compiler 1 of 3...; ` - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` - ` - Write-Host INFO: Downloading compiler 2 of 3...; ` - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` - ` - Write-Host INFO: Downloading compiler 3 of 3...; ` - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` - ` - Write-Host INFO: Extracting git...; ` - Expand-Archive C:\gitsetup.zip C:\git-tmp; ` - New-Item -Type Directory C:\git | Out-Null; ` - Move-Item C:\git-tmp\tools\* C:\git\.; ` - Remove-Item -Recurse -Force C:\git-tmp; ` - ` - Write-Host INFO: Expanding go...; ` - Expand-Archive C:\go.zip -DestinationPath C:\; ` - ` - Write-Host INFO: Expanding compiler 1 of 3...; ` - Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` - Write-Host INFO: Expanding compiler 2 of 3...; ` - Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` - Write-Host INFO: Expanding compiler 3 of 3...; ` - Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` - ` - Write-Host INFO: Removing downloaded files...; ` - Remove-Item C:\gcc.zip; ` - Remove-Item C:\runtime.zip; ` - Remove-Item C:\binutils.zip; ` - Remove-Item C:\gitsetup.zip; ` - ` - Write-Host INFO: Creating source directory...; ` - New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` - ` - Write-Host INFO: Configuring git core.autocrlf...; ` - C:\git\cmd\git config --global core.autocrlf true; ` - ` - Write-Host INFO: Completed - -# Make PowerShell the default entrypoint -ENTRYPOINT ["powershell.exe"] - -# Set the working directory to the location of the sources -WORKDIR C:\go\src\github.com\docker\docker - -# Copy the sources into the container -COPY . . diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index 9c8e20ab..6d8d58fb 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2017 Docker, Inc. + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS deleted file mode 100644 index 8f76e710..00000000 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ /dev/null @@ -1,485 +0,0 @@ -# Moby maintainers file -# -# This file describes the maintainer groups within the moby/moby project. -# More detail on Moby project governance is available in the -# project/GOVERNANCE.md file found in this repository. -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant -# parser. -# -# TODO(estesp): This file should not necessarily depend on docker/opensource -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - - [Org."Core maintainers"] - - # The Core maintainers are the ghostbusters of the project: when there's a problem others - # can't solve, they show up and fix it with bizarre devices and weaponry. - # They have final say on technical implementation and coding style. - # They are ultimately responsible for quality in all its forms: usability polish, - # bugfixes, performance, stability, etc. When ownership can cleanly be passed to - # a subsystem, they are responsible for doing so and holding the - # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. - - people = [ - "aaronlehmann", - "akihirosuda", - "anusha", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "dnephin", - "duglin", - "estesp", - "jhowardmsft", - "johnstep", - "justincormack", - "mhbauer", - "mlaventure", - "runcom", - "stevvooe", - "tianon", - "tibor", - "tonistiigi", - "unclejack", - "vdemeester", - "vieux", - "yongtang" - ] - - [Org."Docs maintainers"] - - # TODO Describe the docs maintainers role. - - people = [ - "misty", - "thajeztah" - ] - - [Org.Curators] - - # The curators help ensure that incoming issues and pull requests are properly triaged and - # that our various contribution and reviewing processes are respected. With their knowledge of - # the repository activity, they can also guide contributors to relevant material or - # discussions. - # - # They are neither code nor docs reviewers, so they are never expected to merge. They can - # however: - # - close an issue or pull request when it's an exact duplicate - # - close an issue or pull request when it's inappropriate or off-topic - - people = [ - "alexellis", - "andrewhsu", - "anonymuse", - "chanwit", - "fntlnz", - "gianarb", - "programmerq", - "rheinwein", - "ripcurld", - "thajeztah" - ] - - [Org.Alumni] - - # This list contains maintainers that are no longer active on the project. - # It is thanks to these people that the project has become what it is today. - # Thank you! - - people = [ - # Harald Albers is the mastermind behind the bash completion scripts for the - # Docker CLI. The completion scripts moved to the Docker CLI repository, so - # you can now find him perform his magic in the https://github.com/docker/cli repository. - "albers", - - # Andrea Luzzardi started contributing to the Docker codebase in the "dotCloud" - # era, even before it was called "Docker". He is one of the architects of both - # Swarm and SwarmKit, and its integration into the Docker engine. - "aluzzardi", - - # David Calavera contributed many features to Docker, such as an improved - # event system, dynamic configuration reloading, volume plugins, fancy - # new templating options, and an external client credential store. As a - # maintainer, David was release captain for Docker 1.8, and competing - # with Jess Frazelle to be "top dream killer". - # David is now doing amazing stuff as CTO for https://www.netlify.com, - # and tweets as @calavera. - "calavera", - - # As a maintainer, Erik was responsible for the "builder", and - # started the first designs for the new networking model in - # Docker. Erik is now working on all kinds of plugins for Docker - # (https://github.com/contiv) and various open source projects - # in his own repository https://github.com/erikh. You may - # still stumble into him in our issue tracker, or on IRC. - "erikh", - - # Evan Hazlett is the creator of of the Shipyard and Interlock open source projects, - # and the author of "Orca", which became the foundation of Docker Universal Control - # Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks) - # into the Docker engine. - "ehazlett", - - # Arnaud Porterie (AKA "icecrime") was in charge of maintaining the maintainers. - # As a maintainer, he made life easier for contributors to the Docker open-source - # projects, bringing order in the chaos by designing a triage- and review workflow - # using labels (see https://icecrime.net/technology/a-structured-approach-to-labeling/), - # and automating the hell out of things with his buddies GordonTheTurtle and Poule - # (a chicken!). - # - # A lesser-known fact is that he created the first commit in the libnetwork repository - # even though he didn't know anything about it. Some say, he's now selling stuff on - # the internet ;-) - "icecrime", - - # After a false start with his first PR being rejected, James Turnbull became a frequent - # contributor to the documentation, and became a docs maintainer on December 5, 2013. As - # a maintainer, James lifted the docs to a higher standard, and introduced the community - # guidelines ("three strikes"). James is currently changing the world as CTO of https://www.empatico.org, - # meanwhile authoring various books that are worth checking out. You can find him on Twitter, - # rambling as @kartar, and although no longer active as a maintainer, he's always "game" to - # help out reviewing docs PRs, so you may still see him around in the repository. - "jamtur01", - - # Jessica Frazelle, also known as the "Keyser Söze of containers", - # runs *everything* in containers. She started contributing to - # Docker with a (fun fun) change involving both iptables and regular - # expressions (coz, YOLO!) on July 10, 2014 - # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. - # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed - # many features and improvement, among which "seccomp profiles" (making - # containers a lot more secure). Besides being a maintainer, she - # set up the CI infrastructure for the project, giving everyone - # something to shout at if a PR failed ("noooo Janky!"). - # Be sure you don't miss her talks at a conference near you (a must-see), - # read her blog at https://blog.jessfraz.com (a must-read), and - # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). - "jessfraz", - - # Alexander Morozov contributed many features to Docker, worked on the premise of - # what later became containerd (and worked on that too), and made a "stupid" Go - # vendor tool specificaly for docker/docker needs: vndr (https://github.com/LK4D4/vndr). - # Not many know that Alexander is a master negotiator, being able to change course - # of action with a single "Nope, we're not gonna do that". - "lk4d4", - - # Madhu Venugopal was part of the SocketPlane team that joined Docker. - # As a maintainer, he was working with Jana for the Container Network - # Model (CNM) implemented through libnetwork, and the "routing mesh" powering - # Swarm mode networking. - "mavenugo", - - # As a docs maintainer, Mary Anthony contributed greatly to the Docker - # docs. She wrote the Docker Contributor Guide and Getting Started - # Guides. She helped create a doc build system independent of - # docker/docker project, and implemented a new docs.docker.com theme and - # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub - # public repositories was originally referenced in - # maryatdocker/docker-whale back in May 2015. - "moxiegirl", - - # Jana Radhakrishnan was part of the SocketPlane team that joined Docker. - # As a maintainer, he was the lead architect for the Container Network - # Model (CNM) implemented through libnetwork, and the "routing mesh" powering - # Swarm mode networking. - # - # Jana started new adventures in networking, but you can find him tweeting as @mrjana, - # coding on GitHub https://github.com/mrjana, and he may be hiding on the Docker Community - # slack channel :-) - "mrjana", - - # Sven Dowideit became a well known person in the Docker ecosphere, building - # boot2docker, and became a regular contributor to the project, starting as - # early as October 2013 (https://github.com/docker/docker/pull/2119), to become - # a maintainer less than two months later (https://github.com/docker/docker/pull/3061). - # - # As a maintainer, Sven took on the task to convert the documentation from - # ReStructuredText to Markdown, migrate to Hugo for generating the docs, and - # writing tooling for building, testing, and publishing them. - # - # If you're not in the occasion to visit "the Australian office", you - # can keep up with Sven on Twitter (@SvenDowideit), his blog http://fosiki.com, - # and of course on GitHub. - "sven", - - # Vincent "vbatts!" Batts made his first contribution to the project - # in November 2013, to become a maintainer a few months later, on - # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). - # As a maintainer, Vincent made important contributions to core elements - # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). - # He also contributed the "tar-split" library, an important element - # for the content-addressable store. - # Vincent is currently a member of the Open Containers Initiative - # Technical Oversight Board (TOB), besides his work at Red Hat and - # Project Atomic. You can still find him regularly hanging out in - # our repository and the #docker-dev and #docker-maintainers IRC channels - # for a chat, as he's always a lot of fun. - "vbatts", - - # Vishnu became a maintainer to help out on the daemon codebase and - # libcontainer integration. He's currently involved in the - # Open Containers Initiative, working on the specifications, - # besides his work on cAdvisor and Kubernetes for Google. - "vishh" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.alexellis] - Name = "Alex Ellis" - Email = "alexellis2@gmail.com" - GitHub = "alexellis" - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "suda.akihiro@lab.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.aluzzardi] - Name = "Andrea Luzzardi" - Email = "al@docker.com" - GitHub = "aluzzardi" - - [people.albers] - Name = "Harald Albers" - Email = "github@albersweb.de" - GitHub = "albers" - - [people.andrewhsu] - Name = "Andrew Hsu" - Email = "andrewhsu@docker.com" - GitHub = "andrewhsu" - - [people.anonymuse] - Name = "Jesse White" - Email = "anonymuse@gmail.com" - GitHub = "anonymuse" - - [people.anusha] - Name = "Anusha Ragunathan" - Email = "anusha@docker.com" - GitHub = "anusha-ragunathan" - - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" - - [people.coolljt0725] - Name = "Lei Jitang" - Email = "leijitang@huawei.com" - GitHub = "coolljt0725" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - GitHub = "cpuguy83" - - [people.chanwit] - Name = "Chanwit Kaewkasi" - Email = "chanwit@gmail.com" - GitHub = "chanwit" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.ehazlett] - Name = "Evan Hazlett" - Email = "ejhazlett@gmail.com" - GitHub = "ehazlett" - - [people.erikh] - Name = "Erik Hollensbe" - Email = "erik@docker.com" - GitHub = "erikh" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.fntlnz] - Name = "Lorenzo Fontana" - Email = "fontanalorenz@gmail.com" - GitHub = "fntlnz" - - [people.gianarb] - Name = "Gianluca Arbezzano" - Email = "ga@thumpflow.com" - GitHub = "gianarb" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "icecrime@gmail.com" - GitHub = "icecrime" - - [people.jamtur01] - Name = "James Turnbull" - Email = "james@lovedthanlost.net" - GitHub = "jamtur01" - - [people.jhowardmsft] - Name = "John Howard" - Email = "jhoward@microsoft.com" - GitHub = "jhowardmsft" - - [people.jessfraz] - Name = "Jessie Frazelle" - Email = "jess@linux.com" - GitHub = "jessfraz" - - [people.johnstep] - Name = "John Stephens" - Email = "johnstep@docker.com" - GitHub = "johnstep" - - [people.justincormack] - Name = "Justin Cormack" - Email = "justin.cormack@docker.com" - GitHub = "justincormack" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.mavenugo] - Name = "Madhu Venugopal" - Email = "madhu@docker.com" - GitHub = "mavenugo" - - [people.mhbauer] - Name = "Morgan Bauer" - Email = "mbauer@us.ibm.com" - GitHub = "mhbauer" - - [people.misty] - Name = "Misty Stanley-Jones" - Email = "misty@docker.com" - GitHub = "mistyhacks" - - [people.mlaventure] - Name = "Kenfe-Mickaël Laventure" - Email = "mickael.laventure@gmail.com" - GitHub = "mlaventure" - - [people.moxiegirl] - Name = "Mary Anthony" - Email = "mary.anthony@docker.com" - GitHub = "moxiegirl" - - [people.mrjana] - Name = "Jana Radhakrishnan" - Email = "mrjana@docker.com" - GitHub = "mrjana" - - [people.programmerq] - Name = "Jeff Anderson" - Email = "jeff@docker.com" - GitHub = "programmerq" - - [people.rheinwein] - Name = "Laura Frank" - Email = "laura@codeship.com" - GitHub = "rheinwein" - - [people.ripcurld] - Name = "Boaz Shuster" - Email = "ripcurld.github@gmail.com" - GitHub = "ripcurld" - - [people.runcom] - Name = "Antonio Murdaca" - Email = "runcom@redhat.com" - GitHub = "runcom" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" - - [people.sven] - Name = "Sven Dowideit" - Email = "SvenDowideit@home.org.au" - GitHub = "SvenDowideit" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.tonistiigi] - Name = "Tõnis Tiigi" - Email = "tonis@docker.com" - GitHub = "tonistiigi" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - - [people.vbatts] - Name = "Vincent Batts" - Email = "vbatts@redhat.com" - GitHub = "vbatts" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" - - [people.vishh] - Name = "Vishnu Kannan" - Email = "vishnuk@google.com" - GitHub = "vishh" - - [people.yongtang] - Name = "Yong Tang" - Email = "yong.tang.github@outlook.com" - GitHub = "yongtang" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile deleted file mode 100644 index 3c19caa8..00000000 --- a/vendor/github.com/docker/docker/Makefile +++ /dev/null @@ -1,212 +0,0 @@ -.PHONY: all binary dynbinary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration test-unit validate win - -# set the graph driver as the current graphdriver if not set -DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) -export DOCKER_GRAPHDRIVER -DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1) -export DOCKER_INCREMENTAL_BINARY - -# get OS/Arch of docker engine -DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') -DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') - -DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported) -export DOCKER_GITCOMMIT - -# env vars passed through directly to Docker's build scripts -# to allow things like `make KEEPBUNDLE=1 binary` easily -# `project/PACKAGERS.md` have some limited documentation of some of these -# -# DOCKER_LDFLAGS can be used to pass additional parameters to -ldflags -# option of "go build". For example, a built-in graphdriver priority list -# can be changed during build time like this: -# -# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary -# -DOCKER_ENVS := \ - -e DOCKER_CROSSPLATFORMS \ - -e BUILD_APT_MIRROR \ - -e BUILDFLAGS \ - -e KEEPBUNDLE \ - -e DOCKER_BUILD_ARGS \ - -e DOCKER_BUILD_GOGC \ - -e DOCKER_BUILD_PKGS \ - -e DOCKER_BASH_COMPLETION_PATH \ - -e DOCKER_CLI_PATH \ - -e DOCKER_DEBUG \ - -e DOCKER_EXPERIMENTAL \ - -e DOCKER_GITCOMMIT \ - -e DOCKER_GRAPHDRIVER \ - -e DOCKER_INCREMENTAL_BINARY \ - -e DOCKER_LDFLAGS \ - -e DOCKER_PORT \ - -e DOCKER_REMAP_ROOT \ - -e DOCKER_STORAGE_OPTS \ - -e DOCKER_USERLANDPROXY \ - -e TEST_INTEGRATION_DIR \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT \ - -e HTTP_PROXY \ - -e HTTPS_PROXY \ - -e NO_PROXY \ - -e http_proxy \ - -e https_proxy \ - -e no_proxy \ - -e VERSION \ - -e PLATFORM -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds - -# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` -# (default to no bind mount if DOCKER_HOST is set) -# note: BINDDIR is supported for backwards-compatibility here -BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) -DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") - -# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. -# The volume will be cleaned up when the container is removed due to `--rm`. -# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. -DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git" - -# This allows to set the docker-dev container name -DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),) - -# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set -PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo -PKGCACHE_VOLROOT := dockerdev-go-pkg-cache -PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-) -DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),) -DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,) -DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,) -DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION) - -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") -DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) -DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) - -DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) -BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) -export BUILD_APT_MIRROR - -SWAGGER_DOCS_PORT ?= 9000 - -INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master) -INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker) - -define \n - - -endef - -# if this session isn't interactive, then we don't want to allocate a -# TTY, which would fail, but if it is interactive, we do want to attach -# so that the user can send e.g. ^C through. -INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) -ifeq ($(INTERACTIVE), 1) - DOCKER_FLAGS += -t -endif - -DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" - -default: binary - -all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives - $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' - -binary: build ## build the linux binaries - $(DOCKER_RUN_DOCKER) hack/make.sh binary - -dynbinary: build ## build the linux dynbinaries - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary - -build: bundles init-go-pkg-cache - $(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) - docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . - -bundles: - mkdir bundles - -clean: clean-pkg-cache-vol ## clean up cached resources - -clean-pkg-cache-vol: - @- $(foreach mapping,$(PKGCACHE_MAP), \ - $(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \ - ) - -cross: build ## cross build the binaries for darwin, freebsd and\nwindows - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross - -deb: build ## build the deb packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb - - -help: ## this help - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) - -init-go-pkg-cache: - $(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g')) - -install: ## install the linux binaries - KEEPBUNDLE=1 hack/make.sh install-binary - -rpm: build ## build the rpm packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm - -run: build ## run the docker daemon in a container - $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" - -shell: build ## start a shell inside the build env - $(DOCKER_RUN_DOCKER) bash - -test: build test-unit ## run the unit, integration and docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py - -test-docker-py: build ## run the docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py - -test-integration-cli: test-integration ## (DEPRECATED) use test-integration - -test-integration: build ## run the integration tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration - -test-unit: build ## run the unit tests - $(DOCKER_RUN_DOCKER) hack/test/unit - -validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor - $(DOCKER_RUN_DOCKER) hack/validate/all - -win: build ## cross build the binary for windows - $(DOCKER_RUN_DOCKER) hack/make.sh win - -.PHONY: swagger-gen -swagger-gen: - docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ - -w /go/src/github.com/docker/docker \ - --entrypoint hack/generate-swagger-api.sh \ - -e GOPATH=/go \ - quay.io/goswagger/swagger:0.7.4 - -.PHONY: swagger-docs -swagger-docs: ## preview the API documentation - @echo "API docs preview will be running at http://localhost:$(SWAGGER_DOCS_PORT)" - @docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \ - -e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \ - -p $(SWAGGER_DOCS_PORT):80 \ - bfirsh/redoc:1.6.2 - -build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel - @echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)" - go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host - @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)" - docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent -# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on - @echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)" - $(eval tmp := integration-cli-worker-tmp) -# We mount pkgcache, but not bundle (bundle needs to be baked into the image) -# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here - docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top - docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary - docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker - docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE) - docker rm -f $(tmp) diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md deleted file mode 100644 index 534fd97d..00000000 --- a/vendor/github.com/docker/docker/README.md +++ /dev/null @@ -1,57 +0,0 @@ -The Moby Project -================ - -![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project") - -Moby is an open-source project created by Docker to enable and accelerate software containerization. - -It provides a "Lego set" of toolkit components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts and professionals to experiment and exchange ideas. -Components include container build tools, a container registry, orchestration tools, a runtime and more, and these can be used as building blocks in conjunction with other tools and projects. - -## Principles - -Moby is an open project guided by strong principles, aiming to be modular, flexible and without too strong an opinion on user experience. -It is open to the community to help set its direction. - -- Modular: the project includes lots of components that have well-defined functions and APIs that work together. -- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations. -- Usable security: Moby provides secure defaults without compromising usability. -- Developer focused: The APIs are intended to be functional and useful to build powerful tools. -They are not necessarily intended as end user tools but as components aimed at developers. -Documentation and UX is aimed at developers not end users. - -## Audience - -The Moby Project is intended for engineers, integrators and enthusiasts looking to modify, hack, fix, experiment, invent and build systems based on containers. -It is not for people looking for a commercially supported system, but for people who want to work and learn with open source code. - -## Relationship with Docker - -The components and tools in the Moby Project are initially the open source components that Docker and the community have built for the Docker Project. -New projects can be added if they fit with the community goals. Docker is committed to using Moby as the upstream for the Docker Product. -However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed. - -The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful. -The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases. - ------ - -Legal -===== - -*Brought to you courtesy of our legal counsel. For more context, -please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.* - -Use and transfer of Moby may be subject to certain restrictions by the -United States and other governments. - -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -Licensing -========= -Moby is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full -license text. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md deleted file mode 100644 index e2e6b2b9..00000000 --- a/vendor/github.com/docker/docker/ROADMAP.md +++ /dev/null @@ -1,68 +0,0 @@ -Moby Project Roadmap -==================== - -### How should I use this document? - -This document provides description of items that the project decided to prioritize. This should -serve as a reference point for Moby contributors to understand where the project is going, and -help determine if a contribution could be conflicting with some longer term plans. - -The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be -refused! We are always happy to receive patches for new cool features we haven't thought about, -or didn't judge to be a priority. Please however understand that such patches might take longer -for us to review. - -### How can I help? - -Short term objectives are listed in -[Issues](https://github.com/moby/moby/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our -goal is to split down the workload in such way that anybody can jump in and help. Please comment on -issues if you want to work on it to avoid duplicating effort! Similarly, if a maintainer is already -assigned on an issue you'd like to participate in, pinging him on GitHub to offer your help is -the best way to go. - -### How can I add something to the roadmap? - -The roadmap process is new to the Moby Project: we are only beginning to structure and document the -project objectives. Our immediate goal is to be more transparent, and work with our community to -focus our efforts on fewer prioritized topics. - -We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but -we are not quite there yet. For the time being, it is best to discuss with the maintainers on an -issue, in the Slack channel, or in person at the Moby Summits that happen every few months. - -# 1. Features and refactoring - -## 1.1 Runtime improvements - -We introduced [`runC`](https://runc.io) as a standalone low-level tool for container -execution in 2015, the first stage in spinning out parts of the Engine into standalone tools. - -As runC continued evolving, and the OCI specification along with it, we created -[`containerd`](https://github.com/containerd/containerd), a daemon to control and monitor `runC`. -In late 2016 this was relaunched as the `containerd` 1.0 track, aiming to provide a common runtime -for the whole spectrum of container systems, including Kubernetes, with wide community support. -This change meant that there was an increased scope for `containerd`, including image management -and storage drivers. - -Moby will rely on a long-running `containerd` companion daemon for all container execution -related operations. This could open the door in the future for Engine restarts without interrupting -running containers. The switch over to containerd 1.0 is an important goal for the project, and -will result in a significant simplification of the functions implemented in this repository. - -## 1.2 Internal decoupling - -A lot of work has been done in trying to decouple Moby internals. This process of creating -standalone projects with a well defined function that attract a dedicated community should continue. -As well as integrating `containerd` we would like to integrate [BuildKit](https://github.com/moby/buildkit) -as the next standalone component. - -We see gRPC as the natural communication layer between decoupled components. - -## 1.3 Custom assembly tooling - -We have been prototyping the Moby [assembly tool](https://github.com/moby/tool) which was originally -developed for LinuxKit and intend to turn it into a more generic packaging and assembly mechanism -that can build not only the default version of Moby, as distribution packages or other useful forms, -but can also build very different container systems, themselves built of cooperating daemons built in -and running in containers. We intend to merge this functionality into this repo. diff --git a/vendor/github.com/docker/docker/TESTING.md b/vendor/github.com/docker/docker/TESTING.md deleted file mode 100644 index 1231e1c5..00000000 --- a/vendor/github.com/docker/docker/TESTING.md +++ /dev/null @@ -1,71 +0,0 @@ -# Testing - -This document contains the Moby code testing guidelines. It should answer any -questions you may have as an aspiring Moby contributor. - -## Test suites - -Moby has two test suites (and one legacy test suite): - -* Unit tests - use standard `go test` and - [gotestyourself/assert](https://godoc.org/github.com/gotestyourself/gotestyourself/assert) assertions. They are located in - the package they test. Unit tests should be fast and test only their own - package. -* API integration tests - use standard `go test` and - [gotestyourself/assert](https://godoc.org/github.com/gotestyourself/gotestyourself/assert) assertions. They are located in - `./integration/` directories, where `component` is: container, - image, volume, etc. These tests perform HTTP requests to an API endpoint and - check the HTTP response and daemon state after the call. - -The legacy test suite `integration-cli/` is deprecated. No new tests will be -added to this suite. Any tests in this suite which require updates should be -ported to either the unit test suite or the new API integration test suite. - -## Writing new tests - -Most code changes will fall into one of the following categories. - -### Writing tests for new features - -New code should be covered by unit tests. If the code is difficult to test with -a unit tests then that is a good sign that it should be refactored to make it -easier to reuse and maintain. Consider accepting unexported interfaces instead -of structs so that fakes can be provided for dependencies. - -If the new feature includes a completely new API endpoint then a new API -integration test should be added to cover the success case of that endpoint. - -If the new feature does not include a completely new API endpoint consider -adding the new API fields to the existing test for that endpoint. A new -integration test should **not** be added for every new API field or API error -case. Error cases should be handled by unit tests. - -### Writing tests for bug fixes - -Bugs fixes should include a unit test case which exercises the bug. - -A bug fix may also include new assertions in an existing integration tests for the -API endpoint. - -## Running tests - -To run the unit test suite: - -``` -make test-unit -``` - -or `hack/test/unit` from inside a `BINDDIR=. make shell` container or properly -configured environment. - -The following environment variables may be used to run a subset of tests: - -* `TESTDIRS` - paths to directories to be tested, defaults to `./...` -* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern - use `TESTFLAGS="-test.run TestNameOrPrefix"` - -To run the integration test suite: - -``` -make test-integration -``` diff --git a/vendor/github.com/docker/docker/VENDORING.md b/vendor/github.com/docker/docker/VENDORING.md deleted file mode 100644 index 8884f885..00000000 --- a/vendor/github.com/docker/docker/VENDORING.md +++ /dev/null @@ -1,46 +0,0 @@ -# Vendoring policies - -This document outlines recommended Vendoring policies for Docker repositories. -(Example, libnetwork is a Docker repo and logrus is not.) - -## Vendoring using tags - -Commit ID based vendoring provides little/no information about the updates -vendored. To fix this, vendors will now require that repositories use annotated -tags along with commit ids to snapshot commits. Annotated tags by themselves -are not sufficient, since the same tag can be force updated to reference -different commits. - -Each tag should: -- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") -- Have a corresponding entry in the change tracking document. - -Each repo should: -- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, -github releases file. - -The goal here is for consuming repos to be able to use the tag version and -changelog updates to determine whether the vendoring will cause any breaking or -backward incompatible changes. This also means that repos can specify having -dependency on a package of a specific version or greater up to the next major -release, without encountering breaking changes. - -## Semantic Versioning -Annotated version tags should follow [Semantic Versioning](http://semver.org) policies: - -"Given a version number MAJOR.MINOR.PATCH, increment the: - - 1. MAJOR version when you make incompatible API changes, - 2. MINOR version when you add functionality in a backwards-compatible manner, and - 3. PATCH version when you make backwards-compatible bug fixes. - -Additional labels for pre-release and build metadata are available as extensions -to the MAJOR.MINOR.PATCH format." - -## Vendoring cadence -In order to avoid huge vendoring changes, it is recommended to have a regular -cadence for vendoring updates. e.g. monthly. - -## Pre-merge vendoring tests -All related repos will be vendored into docker/docker. -CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/docker/docker/api/BUILD.bazel b/vendor/github.com/docker/docker/api/BUILD.bazel deleted file mode 100644 index a23279db..00000000 --- a/vendor/github.com/docker/docker/api/BUILD.bazel +++ /dev/null @@ -1,45 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "common_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "common_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/api", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index f136c343..00000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index beb251a9..4582eb92 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,9 +3,9 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.37" + DefaultVersion = "1.39" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. - NoBaseImageSpecifier string = "scratch" + NoBaseImageSpecifier = "scratch" ) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go index af1a5416..504b0c90 100644 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -3,4 +3,4 @@ package api // import "github.com/docker/docker/api" // MinVersion represents Minimum REST API version supported -const MinVersion string = "1.12" +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a0273..00000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index db83986b..00000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,10075 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.37" -info: - title: "Docker Engine API" - version: "1.37" - x-logo: - url: "https://docs.docker.com/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.37) is used. - For example, calling `/info` is the same as calling `/v1.36/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. - - To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: "A mount point inside a container" - properties: - Type: - type: "string" - Name: - type: "string" - Source: - type: "string" - Destination: - type: "string" - Driver: - type: "string" - Mode: - type: "string" - RW: - type: "boolean" - Propagation: - type: "string" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: "If `on-failure` is used, the number of times to retry before giving up" - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: "An integer value representing this container's relative CPU weight versus other containers." - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpusetCpus: - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" - type: "string" - example: "0-3" - CpusetMems: - description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." - type: "integer" - format: "int64" - MemorySwappiness: - description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCPUs: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." - type: "boolean" - x-nullable: true - PidsLimit: - description: "Tune a container's pids limit. Set -1 for unlimited." - type: "integer" - format: "int64" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: "Maximum IO in bytes per second for the container system drive (Windows only)" - type: "integer" - format: "int64" - - ResourceObject: - description: "An object describing the resources which can be advertised by a node and requested by a task" - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - Timeout: - description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - Retries: - description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." - type: "integer" - StartPeriod: - description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding is a string in one of these forms: - - - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. - - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken - as a custom network's name to which this container should connect to." - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: "A list of volumes to inherit from another container, specified in the form `[:]`." - items: - type: "string" - Mounts: - description: "Specification for mounts to be added to the container." - type: "array" - items: - $ref: "#/definitions/Mount" - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: "A list of kernel capabilities to add to the container." - items: - type: "string" - CapDrop: - type: "array" - description: "A list of kernel capabilities to drop from the container." - items: - type: "string" - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: "A list of links for the container in the form `container_name:alias`." - items: - type: "string" - OomScoreAdj: - type: "integer" - description: "An integer value containing the score given to the container in order to tune OOM killer preferences." - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when the container starts. - The allocated port might be changed when restarting the container. - - The port is selected from the ephemeral port range that depends on the kernel. - For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." - ShmSize: - type: "integer" - description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array. (Windows only)" - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - Isolation: - type: "string" - description: "Isolation technology of the container. (Windows only)" - enum: - - "default" - - "process" - - "hyperv" - - ContainerConfig: - description: "Configuration for a container that is portable between hosts" - type: "object" - properties: - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Domainname: - description: "The domain name to use for the container." - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - Tty: - description: "Attach standard streams to a TTY, including `stdin` if it is not closed." - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. - type: "array" - items: - type: "string" - Cmd: - description: "Command to run specified as a string or an array of strings." - type: "array" - items: - type: "string" - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - Image: - description: "The name of the image to use when creating the container" - type: "string" - Volumes: - description: "An object mapping mount point paths inside the container to empty objects." - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - MacAddress: - description: "MAC address of the container." - type: "string" - OnBuild: - description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." - type: "array" - items: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - StopSignal: - description: "Signal to stop a container as a string or unsigned integer." - type: "string" - default: "SIGTERM" - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - Shell: - description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." - type: "array" - items: - type: "string" - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: Name of the network'a bridge (for example, `docker0`). - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. - type: "string" - example: "fe80::42:acff:fe11:1" - LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. - type: "integer" - example: "64" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey identifies the sandbox - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - # TODO is SecondaryIPAddresses actually used? - SecondaryIPAddresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO is SecondaryIPv6Addresses actually used? - SecondaryIPv6Addresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - x-nullable: true - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - GraphDriverData: - description: "Information about a container's graph driver." - type: "object" - required: [Name, Data] - properties: - Name: - type: "string" - x-nullable: false - Data: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - - Image: - type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS - properties: - Id: - type: "string" - x-nullable: false - RepoTags: - type: "array" - items: - type: "string" - RepoDigests: - type: "array" - items: - type: "string" - Parent: - type: "string" - x-nullable: false - Comment: - type: "string" - x-nullable: false - Created: - type: "string" - x-nullable: false - Container: - type: "string" - x-nullable: false - ContainerConfig: - $ref: "#/definitions/ContainerConfig" - DockerVersion: - type: "string" - x-nullable: false - Author: - type: "string" - x-nullable: false - Config: - $ref: "#/definitions/ContainerConfig" - Architecture: - type: "string" - x-nullable: false - Os: - type: "string" - x-nullable: false - OsVersion: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - VirtualSize: - type: "integer" - format: "int64" - x-nullable: false - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - Layers: - type: "array" - items: - type: "string" - BaseLayer: - type: "string" - Metadata: - type: "object" - properties: - LastTagTime: - type: "string" - format: "dateTime" - - ImageSummary: - type: "object" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - VirtualSize - - Labels - - Containers - properties: - Id: - type: "string" - x-nullable: false - ParentId: - type: "string" - x-nullable: false - RepoTags: - type: "array" - x-nullable: false - items: - type: "string" - RepoDigests: - type: "array" - x-nullable: false - items: - type: "string" - Created: - type: "integer" - x-nullable: false - Size: - type: "integer" - x-nullable: false - SharedSize: - type: "integer" - x-nullable: false - VirtualSize: - type: "integer" - x-nullable: false - Labels: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - Containers: - x-nullable: false - type: "integer" - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - Scope: - type: "string" - description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." - default: "local" - x-nullable: false - enum: ["local", "global"] - Options: - type: "object" - description: "The driver specific options used when creating the volume." - additionalProperties: - type: "string" - UsageData: - type: "object" - x-nullable: true - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - CreatedAt: "2016-06-07T20:31:11.853781916Z" - - Network: - type: "object" - properties: - Name: - type: "string" - Id: - type: "string" - Created: - type: "string" - format: "dateTime" - Scope: - type: "string" - Driver: - type: "string" - EnableIPv6: - type: "boolean" - IPAM: - $ref: "#/definitions/IPAM" - Internal: - type: "boolean" - Attachable: - type: "boolean" - Ingress: - type: "boolean" - Containers: - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - Options: - type: "object" - additionalProperties: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - Config: - description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" - type: "array" - items: - type: "object" - additionalProperties: - type: "string" - Options: - description: "Driver-specific options, specified as a map." - type: "array" - items: - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - EndpointID: - type: "string" - MacAddress: - type: "string" - IPv4Address: - type: "string" - IPv6Address: - type: "string" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: "True if the plugin is running. False if the plugin is not running, only installed." - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. - The client must send the version number along with the modified specification when updating these objects. - This approach ensures safe concurrency and determinism in that the change on the object - may not be applied if the version number has changed from the last read. In other words, - if two update requests specify the same base version, only one of the requests can succeed. - As a result, two separate update requests that happen at the same time will not - unintentionally overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "logentries" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" - type: "object" - properties: - TrustRoot: - description: "The root CA certificate(s) that are used to validate leaf TLS certificates" - type: "string" - CertIssuerSubject: - description: "The base64-url-safe-encoded raw subject bytes of the issuer" - type: "string" - CertIssuerPublicKey: - description: "The base64-url-safe-encoded raw public key bytes of the issuer" - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: "The number of snapshots to keep beyond the current snapshot." - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: "The delay for an agent to send a heartbeat to the dispatcher." - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: "Configuration for forwarding signing requests to an external certificate authority." - type: "array" - items: - type: "object" - properties: - Protocol: - description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: "URL where certificate signing requests should be sent." - type: "string" - Options: - description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." - type: "object" - additionalProperties: - type: "string" - CACert: - description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." - type: "string" - SigningCACert: - description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." - type: "string" - SigningCAKey: - description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." - type: "string" - ForceRotate: - description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: "If set, generate a key and use it to lock data stored on the managers." - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selectd log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: "Whether there is currently a root CA rotation in progress for the swarm" - type: "boolean" - example: false - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: "Invalid when specified with `ContainerSpec`. *(Experimental release only.)*" - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - ContainerSpec: - type: "object" - description: "Invalid when specified with `PluginSpec`." - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Env: - description: "A list of environment variables in the form `VAR=value`." - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - File: - type: "string" - description: | - Load credential spec from this file. The file is read by the daemon, and must be present in the - `CredentialSpecs` subdirectory in the docker data directory, which defaults to - `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows registry. The specified registry value must be - located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: "Specification for mounts to be added to containers created as part of the service." - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: "Amount of time to wait for the container to terminate before forcefully killing it." - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." - type: "array" - items: - type: "string" - Secrets: - description: "Secrets contains references to zero or more secrets that will be exposed to the service." - type: "array" - items: - type: "object" - properties: - File: - description: "File represents a specific target that is backed by a file." - type: "object" - properties: - Name: - description: "Name represents the final filename in the filesystem." - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: "SecretID represents the ID of the specific secret that we're referencing." - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, but this is just provided for - lookup/display purposes. The secret in the reference will be identified by its ID. - type: "string" - Configs: - description: "Configs contains references to zero or more configs that will be exposed to the service." - type: "array" - items: - type: "object" - properties: - File: - description: "File represents a specific target that is backed by a file." - type: "object" - properties: - Name: - description: "Name represents the final filename in the filesystem." - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - ConfigID: - description: "ConfigID represents the ID of the specific config that we're referencing." - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, but this is just provided for - lookup/display purposes. The config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: "Isolation technology of the containers running the service. (Windows only)" - enum: - - "default" - - "process" - - "hyperv" - Resources: - description: "Resource requirements which apply to each individual container created as part of the service." - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/ResourceObject" - Reservation: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: "Specification for the restart policy which applies to containers created as part of this service." - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." - type: "integer" - format: "int64" - default: 0 - Window: - description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: "An array of constraints." - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - Preferences: - description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: "label descriptor, such as engine.labels.az" - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: "A counter that triggers an update even if no relevant parameters have been changed." - type: "integer" - Runtime: - description: "Runtime is the type of runtime specified for the task executor." - type: "string" - Networks: - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - LogDriver: - description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - DesiredState: - $ref: "#/definitions/TaskState" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an updated task fails to run, or stops running during the update." - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: "Amount of time to monitor each updated task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between rollback iterations, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an rolled back task fails to run, or stops running during the rollback." - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: "Array of network names or IDs to attach the service to." - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerSummary: - type: "array" - items: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: "Name of the secrets driver used to fetch the secret's value from an external secret store" - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - SystemStatus: - description: | - Status information about this node (standalone Swarm API). - -


- - > **Note**: The information returned in this field is only propagated - > by the Swarm standalone API, and is empty (`null`) when using - > built-in swarm mode. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Role", "primary"] - - ["State", "Healthy"] - - ["Strategy", "spread"] - - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] - - ["Nodes", "2"] - - [" swarm-agent-00", "192.168.99.102:2376"] - - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] - - [" └ Status", "Healthy"] - - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] - - [" └ Reserved CPUs", "0 / 1"] - - [" └ Reserved Memory", "0 B / 1.021 GiB"] - - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] - - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] - - [" └ ServerVersion", "17.06.0-ce"] - - [" swarm-manager", "192.168.99.101:2376"] - - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] - - [" └ Status", "Healthy"] - - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] - - [" └ Reserved CPUs", "0 / 1"] - - [" └ Reserved Memory", "0 B / 1.021 GiB"] - - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] - - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] - - [" └ ServerVersion", "17.06.0-ce"] - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true - CpuCfsPeriod: - description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." - type: "boolean" - example: true - CpuCfsQuota: - description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." - type: "boolean" - example: true - CPUShares: - description: "Indicates if CPU Shares limiting is supported by the host." - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." - type: "boolean" - example: true - BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." - type: "boolean" - example: true - Debug: - description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd"] - default: "cgroupfs" - example: "cgroupfs" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "4.9.38-moby" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Alpine Linux v3.5" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in kilobytes (kB). - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://user:pass@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://user:pass@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. - type: "string" - example: "17.06.0-ce" - ClusterStore: - description: | - URL of the distributed storage backend. - - - The storage backend is used for multihost networking (to store - network and endpoint information) and by the node discovery mechanism. - -


- - > **Note**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "consul://consul.corp.example.com:8600/some/path" - ClusterAdvertise: - description: | - The network endpoint that the Engine advertises for the purpose of - node discovery. ClusterAdvertise is a `host:port` combination on which - the daemon is reachable by other hosts. - -


- - > **Note**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "node5.corp.example.com:8000" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "docker-runc" - example: - runc: - path: "docker-runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - InitBinary: - description: | - Name and, optional, path of the the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, and user-namespaces (userns). - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container than inspecting a single container. For example, - the list of linked containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: "Return all containers. By default, only running containers are shown" - type: "boolean" - default: false - - name: "limit" - in: "query" - description: "Return this number of most recently created containers, including non-running ones." - type: "integer" - - name: "size" - in: "query" - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." - type: "string" - pattern: "/?[a-zA-Z0-9_-]+" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - description: "This container's networking configuration." - type: "object" - properties: - EndpointsConfig: - description: "A mapping of network name to endpoint configuration for that network." - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - NanoCPUs: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: -1 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - required: true - responses: - 201: - description: "Container created successfully" - schema: - type: "object" - title: "ContainerCreateResponse" - description: "OK response to ContainerCreate operation" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - description: "The state of the container." - type: "object" - properties: - Status: - description: | - The status of the container. For example, `"running"` or `"exited"`. - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the cgroups freezer is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - Paused: - description: "Whether this container is paused." - type: "boolean" - Restarting: - description: "Whether this container is restarting." - type: "boolean" - OOMKilled: - description: "Whether this container has been killed because it ran out of memory." - type: "boolean" - Dead: - type: "boolean" - Pid: - description: "The process ID of this container" - type: "integer" - ExitCode: - description: "The last exit code of this container" - type: "integer" - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - FinishedAt: - description: "The time when this container last exited." - type: "string" - Image: - description: "The container's image" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Node: - description: "TODO" - type: "object" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - type: "string" - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: "The size of files that have been created or changed by this container." - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - IpcMode: "" - LxcConf: [] - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: "Each process running in the container, where each is process is an array of values corresponding to the titles" - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. - operationId: "ContainerLogs" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified - - `1`: Added - - `2`: Deleted - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - type: "object" - x-go-name: "ContainerChangeResponseItem" - title: "ContainerChangeResponseItem" - description: "change item in response to ContainerChanges operation" - required: [Path, Kind] - properties: - Path: - description: "Path to file that has changed" - type: "string" - x-nullable: false - Kind: - description: "Kind of change" - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of last read, which is used - for calculating the CPU usage percentage. It is not the same as the - `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: "Stream the output. If false, the stats will be output once and then it will disconnect." - type: "boolean" - default: true - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the tty session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the tty session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: "Send a POSIX signal to a container, defaulting to killing to the container." - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: "Change various configuration options of a container without having to recreate it." - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - KernelMemory: 52428800 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the cgroups freezer to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. - - See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. - - Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Stream attached streams from the time the request was made onwards" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - type: "object" - title: "ContainerWaitResponse" - description: "OK response to ContainerWait operation" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - Error: - description: "container waiting error, if any" - type: "object" - properties: - Message: - description: "Details of an error" - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." - type: "string" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove the volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: "TODO" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." - type: "string" - - name: "inputStream" - in: "body" - required: true - description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - type: "string" - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - type: "boolean" - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "" - is_official: false - is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "vgauthier/sshd" - star_count: 0 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-automated=(true|false)` - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemVersionResponse" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - type: "string" - Version: - type: "string" - x-nullable: false - Details: - type: "object" - x-nullable: true - - Version: - type: "string" - ApiVersion: - type: "string" - MinAPIVersion: - type: "string" - GitCommit: - type: "string" - GoVersion: - type: "string" - Os: - type: "string" - Arch: - type: "string" - KernelVersion: - type: "string" - Experimental: - type: "boolean" - BuildTime: - type: "string" - examples: - application/json: - Version: "17.04.0" - Os: "linux" - KernelVersion: "3.19.0-23-generic" - GoVersion: "go1.7.5" - GitCommit: "deadbee" - Arch: "amd64" - ApiVersion: "1.27" - MinAPIVersion: "1.12" - BuildTime: "2016-06-14T07:09:13.444803460+00:00" - Experimental: true - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` - - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` - - Volumes report these events: `create`, `mount`, `unmount`, and `destroy` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - Action: - description: "The type of event" - type: "string" - Actor: - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - Attributes: - description: "Various key/value attributes of the object, depending on its type" - type: "object" - additionalProperties: - type: "string" - time: - description: "Timestamp of event" - type: "integer" - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - examples: - application/json: - Type: "container" - Action: "create" - Actor: - ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - com.example.some-label: "some-label-value" - image: "alpine" - name: "my-container" - time: 1461943101 - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - VirtualSize: 1092588 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image repositories. - - For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - DetachKeys: - type: "string" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: "A list of environment variables in the form `[\"VAR=value\", ...]`." - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." - WorkingDir: - type: "string" - description: "The working directory for the exec process inside the container." - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - example: - Detach: false - Tty: false - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." - operationId: "ExecResize" - responses: - 201: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - type: "object" - title: "VolumeListResponse" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: "Warnings that occurred when fetching the list of volumes" - items: - type: "string" - - examples: - application/json: - Volumes: - - CreatedAt: "2017-07-19T12:00:26Z" - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - type: "object" - properties: - Name: - description: "The new volume's name. If not specified, Docker generates a name." - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than inspecting a single network. For example, - the list of containers attached to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "No error" - schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - CheckDuplicate: - description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." - type: "boolean" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." - type: "boolean" - Ingress: - description: "Ingress network is the network which provides the routing-mesh in swarm mode." - type: "boolean" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to disconnect from the network." - Force: - type: "boolean" - description: "Force the container to disconnect from the network." - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - description: "Describes a permission the user has to accept upon installing the plugin." - type: "object" - title: "PluginPrivilegeItem" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "force" - in: "query" - description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: "The version number of the node object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. - - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. - type: "string" - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. - - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: "Addresses of manager nodes already participating in the swarm." - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: "Force leave swarm, even if this is the last manager or that it will break the cluster." - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: "The version number of the service object being updated. This is required to avoid conflicting writes." - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." - default: "spec" - - name: "rollback" - in: "query" - type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. - - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. - operationId: "ServiceLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." - - name: "version" - in: "query" - description: "The version number of the secret object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." - - name: "version" - in: "query" - description: "The version number of the config object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: "Return image digest and platform information by contacting the registry." - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - properties: - Descriptor: - type: "object" - description: "A descriptor struct containing digest, media type, and size" - properties: - MediaType: - type: "string" - Size: - type: "integer" - format: "int64" - Digest: - type: "string" - URLs: - type: "array" - items: - type: "string" - Platforms: - type: "array" - description: "An array containing all platforms supported by the image" - items: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - OSVersion: - type: "string" - OSFeatures: - type: "array" - items: - type: "string" - Variant: - type: "string" - Features: - type: "array" - items: - type: "string" - examples: - application/json: - Descriptor: - MediaType: "application/vnd.docker.distribution.manifest.v2+json" - Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - Size: 3987495 - URLs: - - "" - Platforms: - - Architecture: "amd64" - OS: "linux" - OSVersion: "" - OSFeatures: - - "" - Variant: "" - Features: - - "" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. - - > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental - > features enabled. The specifications for this endpoint may still change in a future version of the API. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session (experimental)"] diff --git a/vendor/github.com/docker/docker/api/types/BUILD.bazel b/vendor/github.com/docker/docker/api/types/BUILD.bazel deleted file mode 100644 index 531d7c56..00000000 --- a/vendor/github.com/docker/docker/api/types/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "client.go", - "configs.go", - "error_response.go", - "graph_driver_data.go", - "id_response.go", - "image_delete_response_item.go", - "image_summary.go", - "plugin.go", - "plugin_device.go", - "plugin_env.go", - "plugin_interface_type.go", - "plugin_mount.go", - "plugin_responses.go", - "port.go", - "seccomp.go", - "service_update_response.go", - "stats.go", - "types.go", - "volume.go", - ], - importpath = "github.com/docker/docker/api/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/filters:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel b/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel deleted file mode 100644 index cde65042..00000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["blkio.go"], - importpath = "github.com/docker/docker/api/types/blkiodev", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 18b36d59..3b698c2c 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -181,8 +181,24 @@ type ImageBuildOptions struct { Target string SessionID string Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string } +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + // ImageBuildResponse holds information // returned by a server after building // an image. diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index f6537a27..178e911a 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -55,3 +55,10 @@ type PluginEnableConfig struct { type PluginDisableConfig struct { ForceDisable bool } + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/BUILD.bazel b/vendor/github.com/docker/docker/api/types/container/BUILD.bazel deleted file mode 100644 index ba2d6c04..00000000 --- a/vendor/github.com/docker/docker/api/types/container/BUILD.bazel +++ /dev/null @@ -1,59 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "container_changes.go", - "container_create.go", - "container_top.go", - "container_update.go", - "container_wait.go", - "host_config.go", - "waitcondition.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "hostconfig_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "hostconfig_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/api/types/container", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/blkiodev:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/strslice:go_default_library", - "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 02271ecd..4ef26fa6 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -401,6 +401,12 @@ type HostConfig struct { // Mounts specs used by the container Mounts []mount.Mount `json:",omitempty"` + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/events/BUILD.bazel b/vendor/github.com/docker/docker/api/types/events/BUILD.bazel deleted file mode 100644 index 17cec756..00000000 --- a/vendor/github.com/docker/docker/api/types/events/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["events.go"], - importpath = "github.com/docker/docker/api/types/events", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel b/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel deleted file mode 100644 index aa037409..00000000 --- a/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["parse.go"], - importpath = "github.com/docker/docker/api/types/filters", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/docker/docker/api/types/versions:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "example_test.go", - "parse_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/api/types/filters", - deps = [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/filters/example_test.go b/vendor/github.com/docker/docker/api/types/filters/example_test.go deleted file mode 100644 index c8fec1b9..00000000 --- a/vendor/github.com/docker/docker/api/types/filters/example_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package filters // import "github.com/docker/docker/api/types/filters" - -func ExampleArgs_MatchKVList() { - args := NewArgs( - Arg("label", "image=foo"), - Arg("label", "state=running")) - - // returns true because there are no values for bogus - args.MatchKVList("bogus", nil) - - // returns false because there are no sources - args.MatchKVList("label", nil) - - // returns true because all sources are matched - args.MatchKVList("label", map[string]string{ - "image": "foo", - "state": "running", - }) - - // returns false because the values do not match - args.MatchKVList("label", map[string]string{ - "image": "other", - }) -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go deleted file mode 100644 index fbd9ae4f..00000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "errors" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestParseArgs(t *testing.T) { - // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` - flagArgs := []string{ - "created=today", - "image.name=ubuntu*", - "image.name=*untu", - } - var ( - args = NewArgs() - err error - ) - - for i := range flagArgs { - args, err = ParseFlag(flagArgs[i], args) - assert.NilError(t, err) - } - assert.Check(t, is.Len(args.Get("created"), 1)) - assert.Check(t, is.Len(args.Get("image.name"), 2)) -} - -func TestParseArgsEdgeCase(t *testing.T) { - var args Args - args, err := ParseFlag("", args) - if err != nil { - t.Fatal(err) - } - if args.Len() != 0 { - t.Fatalf("Expected an empty Args (map), got %v", args) - } - if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { - t.Fatalf("Expected ErrBadFormat, got %v", err) - } -} - -func TestToJSON(t *testing.T) { - fields := map[string]map[string]bool{ - "created": {"today": true}, - "image.name": {"ubuntu*": true, "*untu": true}, - } - a := Args{fields: fields} - - _, err := ToJSON(a) - if err != nil { - t.Errorf("failed to marshal the filters: %s", err) - } -} - -func TestToParamWithVersion(t *testing.T) { - fields := map[string]map[string]bool{ - "created": {"today": true}, - "image.name": {"ubuntu*": true, "*untu": true}, - } - a := Args{fields: fields} - - str1, err := ToParamWithVersion("1.21", a) - if err != nil { - t.Errorf("failed to marshal the filters with version < 1.22: %s", err) - } - str2, err := ToParamWithVersion("1.22", a) - if err != nil { - t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) - } - if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && - str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { - t.Errorf("incorrectly marshaled the filters: %s", str1) - } - if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && - str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { - t.Errorf("incorrectly marshaled the filters: %s", str2) - } -} - -func TestFromJSON(t *testing.T) { - invalids := []string{ - "anything", - "['a','list']", - "{'key': 'value'}", - `{"key": "value"}`, - } - valid := map[*Args][]string{ - {fields: map[string]map[string]bool{"key": {"value": true}}}: { - `{"key": ["value"]}`, - `{"key": {"value": true}}`, - }, - {fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { - `{"key": ["value1", "value2"]}`, - `{"key": {"value1": true, "value2": true}}`, - }, - {fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { - `{"key1": ["value1"], "key2": ["value2"]}`, - `{"key1": {"value1": true}, "key2": {"value2": true}}`, - }, - } - - for _, invalid := range invalids { - if _, err := FromJSON(invalid); err == nil { - t.Fatalf("Expected an error with %v, got nothing", invalid) - } - } - - for expectedArgs, matchers := range valid { - for _, json := range matchers { - args, err := FromJSON(json) - if err != nil { - t.Fatal(err) - } - if args.Len() != expectedArgs.Len() { - t.Fatalf("Expected %v, go %v", expectedArgs, args) - } - for key, expectedValues := range expectedArgs.fields { - values := args.Get(key) - - if len(values) != len(expectedValues) { - t.Fatalf("Expected %v, go %v", expectedArgs, args) - } - - for _, v := range values { - if !expectedValues[v] { - t.Fatalf("Expected %v, go %v", expectedArgs, args) - } - } - } - } - } -} - -func TestEmpty(t *testing.T) { - a := Args{} - v, err := ToJSON(a) - if err != nil { - t.Errorf("failed to marshal the filters: %s", err) - } - v1, err := FromJSON(v) - if err != nil { - t.Errorf("%s", err) - } - if a.Len() != v1.Len() { - t.Error("these should both be empty sets") - } -} - -func TestArgsMatchKVListEmptySources(t *testing.T) { - args := NewArgs() - if !args.MatchKVList("created", map[string]string{}) { - t.Fatalf("Expected true for (%v,created), got true", args) - } - - args = Args{map[string]map[string]bool{"created": {"today": true}}} - if args.MatchKVList("created", map[string]string{}) { - t.Fatalf("Expected false for (%v,created), got true", args) - } -} - -func TestArgsMatchKVList(t *testing.T) { - // Not empty sources - sources := map[string]string{ - "key1": "value1", - "key2": "value2", - "key3": "value3", - } - - matches := map[*Args]string{ - {}: "field", - {map[string]map[string]bool{ - "created": {"today": true}, - "labels": {"key1": true}}, - }: "labels", - {map[string]map[string]bool{ - "created": {"today": true}, - "labels": {"key1=value1": true}}, - }: "labels", - } - - for args, field := range matches { - if !args.MatchKVList(field, sources) { - t.Fatalf("Expected true for %v on %v, got false", sources, args) - } - } - - differs := map[*Args]string{ - {map[string]map[string]bool{ - "created": {"today": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"today": true}, - "labels": {"key4": true}}, - }: "labels", - {map[string]map[string]bool{ - "created": {"today": true}, - "labels": {"key1=value3": true}}, - }: "labels", - } - - for args, field := range differs { - if args.MatchKVList(field, sources) { - t.Fatalf("Expected false for %v on %v, got true", sources, args) - } - } -} - -func TestArgsMatch(t *testing.T) { - source := "today" - - matches := map[*Args]string{ - {}: "field", - {map[string]map[string]bool{ - "created": {"today": true}}, - }: "today", - {map[string]map[string]bool{ - "created": {"to*": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"to(.*)": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"tod": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"anything": true, "to*": true}}, - }: "created", - } - - for args, field := range matches { - assert.Check(t, args.Match(field, source), - "Expected field %s to match %s", field, source) - } - - differs := map[*Args]string{ - {map[string]map[string]bool{ - "created": {"tomorrow": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"to(day": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"tom(.*)": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"tom": true}}, - }: "created", - {map[string]map[string]bool{ - "created": {"today1": true}, - "labels": {"today": true}}, - }: "created", - } - - for args, field := range differs { - assert.Check(t, !args.Match(field, source), "Expected field %s to not match %s", field, source) - } -} - -func TestAdd(t *testing.T) { - f := NewArgs() - f.Add("status", "running") - v := f.fields["status"] - if len(v) != 1 || !v["running"] { - t.Fatalf("Expected to include a running status, got %v", v) - } - - f.Add("status", "paused") - if len(v) != 2 || !v["paused"] { - t.Fatalf("Expected to include a paused status, got %v", v) - } -} - -func TestDel(t *testing.T) { - f := NewArgs() - f.Add("status", "running") - f.Del("status", "running") - v := f.fields["status"] - if v["running"] { - t.Fatal("Expected to not include a running status filter, got true") - } -} - -func TestLen(t *testing.T) { - f := NewArgs() - if f.Len() != 0 { - t.Fatal("Expected to not include any field") - } - f.Add("status", "running") - if f.Len() != 1 { - t.Fatal("Expected to include one field") - } -} - -func TestExactMatch(t *testing.T) { - f := NewArgs() - - if !f.ExactMatch("status", "running") { - t.Fatal("Expected to match `running` when there are no filters, got false") - } - - f.Add("status", "running") - f.Add("status", "pause*") - - if !f.ExactMatch("status", "running") { - t.Fatal("Expected to match `running` with one of the filters, got false") - } - - if f.ExactMatch("status", "paused") { - t.Fatal("Expected to not match `paused` with one of the filters, got true") - } -} - -func TestOnlyOneExactMatch(t *testing.T) { - f := NewArgs() - - if !f.UniqueExactMatch("status", "running") { - t.Fatal("Expected to match `running` when there are no filters, got false") - } - - f.Add("status", "running") - - if !f.UniqueExactMatch("status", "running") { - t.Fatal("Expected to match `running` with one of the filters, got false") - } - - if f.UniqueExactMatch("status", "paused") { - t.Fatal("Expected to not match `paused` with one of the filters, got true") - } - - f.Add("status", "pause") - if f.UniqueExactMatch("status", "running") { - t.Fatal("Expected to not match only `running` with two filters, got true") - } -} - -func TestContains(t *testing.T) { - f := NewArgs() - if f.Contains("status") { - t.Fatal("Expected to not contain a status key, got true") - } - f.Add("status", "running") - if !f.Contains("status") { - t.Fatal("Expected to contain a status key, got false") - } -} - -func TestInclude(t *testing.T) { - f := NewArgs() - if f.Include("status") { - t.Fatal("Expected to not include a status key, got true") - } - f.Add("status", "running") - if !f.Include("status") { - t.Fatal("Expected to include a status key, got false") - } -} - -func TestValidate(t *testing.T) { - f := NewArgs() - f.Add("status", "running") - - valid := map[string]bool{ - "status": true, - "dangling": true, - } - - if err := f.Validate(valid); err != nil { - t.Fatal(err) - } - - f.Add("bogus", "running") - if err := f.Validate(valid); err == nil { - t.Fatal("Expected to return an error, got nil") - } -} - -func TestWalkValues(t *testing.T) { - f := NewArgs() - f.Add("status", "running") - f.Add("status", "paused") - - f.WalkValues("status", func(value string) error { - if value != "running" && value != "paused" { - t.Fatalf("Unexpected value %s", value) - } - return nil - }) - - err := f.WalkValues("status", func(value string) error { - return errors.New("return") - }) - if err == nil { - t.Fatal("Expected to get an error, got nil") - } - - err = f.WalkValues("foo", func(value string) error { - return errors.New("return") - }) - if err != nil { - t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) - } -} - -func TestFuzzyMatch(t *testing.T) { - f := NewArgs() - f.Add("container", "foo") - - cases := map[string]bool{ - "foo": true, - "foobar": true, - "barfoo": false, - "bar": false, - } - for source, match := range cases { - got := f.FuzzyMatch("container", source) - if got != match { - t.Fatalf("Expected %v, got %v: %s", match, got, source) - } - } -} diff --git a/vendor/github.com/docker/docker/api/types/image/BUILD.bazel b/vendor/github.com/docker/docker/api/types/image/BUILD.bazel deleted file mode 100644 index 2500b1e1..00000000 --- a/vendor/github.com/docker/docker/api/types/image/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["image_history.go"], - importpath = "github.com/docker/docker/api/types/image", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel b/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel deleted file mode 100644 index df6b54f2..00000000 --- a/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["mount.go"], - importpath = "github.com/docker/docker/api/types/mount", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/network/BUILD.bazel b/vendor/github.com/docker/docker/api/types/network/BUILD.bazel deleted file mode 100644 index 66a20f42..00000000 --- a/vendor/github.com/docker/docker/api/types/network/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["network.go"], - importpath = "github.com/docker/docker/api/types/network", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 761d0b34..ccb448f2 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,4 +1,8 @@ package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" +) // Address represents an IP address type Address struct { @@ -106,3 +110,17 @@ type NetworkingConfig struct { type ConfigReference struct { Network string } + +var acceptedFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + "scope": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index cab333e0..abae48b9 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -121,6 +121,9 @@ type PluginConfigArgs struct { // swagger:model PluginConfigInterface type PluginConfigInterface struct { + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + // socket // Required: true Socket string `json:"Socket"` diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go index ad52d46d..d9123474 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -7,7 +7,7 @@ package types // swagger:model Port type Port struct { - // IP + // Host IP address that the container's port is mapped to IP string `json:"IP,omitempty"` // Port on the container diff --git a/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel b/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel deleted file mode 100644 index a46f44be..00000000 --- a/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authenticate.go", - "registry.go", - ], - importpath = "github.com/docker/docker/api/types/registry", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library"], -) diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index 60175c06..20daebed 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -120,7 +120,7 @@ type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` // Packets received. Windows and Linux. RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this + // Received errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. RxErrors uint64 `json:"rx_errors"` // Incoming packets dropped. Windows and Linux. @@ -129,7 +129,7 @@ type NetworkStats struct { TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // Sent errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. diff --git a/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel b/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel deleted file mode 100644 index 951e27c9..00000000 --- a/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["strslice.go"], - importpath = "github.com/docker/docker/api/types/strslice", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["strslice_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/api/types/strslice", -) diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go deleted file mode 100644 index a065eb55..00000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestStrSliceMarshalJSON(t *testing.T) { - for _, testcase := range []struct { - input StrSlice - expected string - }{ - // MADNESS(stevvooe): No clue why nil would be "" but empty would be - // "null". Had to make a change here that may affect compatibility. - {input: nil, expected: "null"}, - {StrSlice{}, "[]"}, - {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, - } { - data, err := json.Marshal(testcase.input) - if err != nil { - t.Fatal(err) - } - if string(data) != testcase.expected { - t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) - } - } -} - -func TestStrSliceUnmarshalJSON(t *testing.T) { - parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, - `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, - } - for json, expectedParts := range parts { - strs := StrSlice{"default", "values"} - if err := strs.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := []string(strs) - if !reflect.DeepEqual(actualParts, expectedParts) { - t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) - } - - } -} - -func TestStrSliceUnmarshalString(t *testing.T) { - var e StrSlice - echo, err := json.Marshal("echo") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - if len(e) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", e) - } - - if e[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", e[0]) - } -} - -func TestStrSliceUnmarshalSlice(t *testing.T) { - var e StrSlice - echo, err := json.Marshal([]string{"echo"}) - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - if len(e) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", e) - } - - if e[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", e[0]) - } -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel b/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel deleted file mode 100644 index b5cf3c2a..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "config.go", - "container.go", - "network.go", - "node.go", - "runtime.go", - "secret.go", - "service.go", - "swarm.go", - "task.go", - ], - importpath = "github.com/docker/docker/api/types/swarm", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm/runtime:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 0041653c..e12f0983 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -55,6 +55,7 @@ type ContainerSpec struct { User string `json:",omitempty"` Groups []string `json:",omitempty"` Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` @@ -70,4 +71,5 @@ type ContainerSpec struct { Secrets []*SecretReference `json:",omitempty"` Configs []*ConfigReference `json:",omitempty"` Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go index 81b5f4cf..0c77403c 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -11,9 +11,17 @@ const ( RuntimeContainer RuntimeType = "container" // RuntimePlugin is the plugin based runtime RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" // RuntimeURLContainer is the proto url for the container type RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" // RuntimeURLPlugin is the proto url for the plugin type RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" ) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel b/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel deleted file mode 100644 index 0688fd1a..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -proto_library( - name = "runtime_proto", - srcs = ["plugin.proto"], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "runtime_go_proto", - importpath = "github.com/docker/docker/api/types/swarm/runtime", - proto = ":runtime_proto", - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["gen.go"], - embed = [":runtime_go_proto"], - importpath = "github.com/docker/docker/api/types/swarm/runtime", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 6d63b778..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 1b111d72..b742cf1b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -1,6 +1,8 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" +) // ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens @@ -10,6 +12,8 @@ type ClusterInfo struct { Spec Spec TLSInfo TLSInfo RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 } // Swarm represents a swarm. @@ -153,6 +157,8 @@ type InitRequest struct { Spec Spec AutoLockManagers bool Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 } // JoinRequest is the request used to join a swarm. diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 5c2bc492..b35605d1 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -60,10 +60,13 @@ type Task struct { // TaskSpec represents the spec of a task. type TaskSpec struct { - // ContainerSpec and PluginSpec are mutually exclusive. - // PluginSpec will only be used when the `Runtime` field is set to `plugin` - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/time/BUILD.bazel b/vendor/github.com/docker/docker/api/types/time/BUILD.bazel deleted file mode 100644 index 0adcbd88..00000000 --- a/vendor/github.com/docker/docker/api/types/time/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "duration_convert.go", - "timestamp.go", - ], - importpath = "github.com/docker/docker/api/types/time", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "duration_convert_test.go", - "timestamp_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/api/types/time", -) diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go deleted file mode 100644 index a23be947..00000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "testing" - "time" -) - -func TestDurationToSecondsString(t *testing.T) { - cases := []struct { - in time.Duration - expected string - }{ - {0 * time.Second, "0"}, - {1 * time.Second, "1"}, - {1 * time.Minute, "60"}, - {24 * time.Hour, "86400"}, - } - - for _, c := range cases { - s := DurationToSecondsString(c.in) - if s != c.expected { - t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index 8d573acc..ea3495ef 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -82,11 +82,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + // if there is a `-` then it's an RFC3339 like timestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) } return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil @@ -104,6 +107,10 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { return def, 0, nil } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp_test.go b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go deleted file mode 100644 index 1a8b20b6..00000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "testing" - "time" -) - -func TestGetTimestamp(t *testing.T) { - now := time.Now().In(time.UTC) - cases := []struct { - in, expected string - expectedErr bool - }{ - // Partial RFC3339 strings get parsed with second precision - {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, - {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, - {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, - {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, - {"2006-01-02T15:04:05", "1136214245.000000000", false}, - {"2006-01-02T15:04:0Z", "", true}, - {"2006-01-02T15:04:0", "", true}, - {"2006-01-02T15:04Z", "1136214240.000000000", false}, - {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, - {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, - {"2006-01-02T15:04", "1136214240.000000000", false}, - {"2006-01-02T15:0Z", "", true}, - {"2006-01-02T15:0", "", true}, - {"2006-01-02T15Z", "1136214000.000000000", false}, - {"2006-01-02T15+00:00", "1136214000.000000000", false}, - {"2006-01-02T15-00:00", "1136214000.000000000", false}, - {"2006-01-02T15", "1136214000.000000000", false}, - {"2006-01-02T1Z", "1136163600.000000000", false}, - {"2006-01-02T1", "1136163600.000000000", false}, - {"2006-01-02TZ", "", true}, - {"2006-01-02T", "", true}, - {"2006-01-02+00:00", "1136160000.000000000", false}, - {"2006-01-02-00:00", "1136160000.000000000", false}, - {"2006-01-02-00:01", "1136160060.000000000", false}, - {"2006-01-02Z", "1136160000.000000000", false}, - {"2006-01-02", "1136160000.000000000", false}, - {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, - - // unix timestamps returned as is - {"1136073600", "1136073600", false}, - {"1136073600.000000001", "1136073600.000000001", false}, - // Durations - {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, - {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, - {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, - - // String fallback - {"invalid", "invalid", false}, - } - - for _, c := range cases { - o, err := GetTimestamp(c.in, now) - if o != c.expected || - (err == nil && c.expectedErr) || - (err != nil && !c.expectedErr) { - t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) - t.Fail() - } - } -} - -func TestParseTimestamps(t *testing.T) { - cases := []struct { - in string - def, expectedS, expectedN int64 - expectedErr bool - }{ - // unix timestamps - {"1136073600", 0, 1136073600, 0, false}, - {"1136073600.000000001", 0, 1136073600, 1, false}, - {"1136073600.0000000010", 0, 1136073600, 1, false}, - {"1136073600.00000001", 0, 1136073600, 10, false}, - {"foo.bar", 0, 0, 0, true}, - {"1136073600.bar", 0, 1136073600, 0, true}, - {"", -1, -1, 0, false}, - } - - for _, c := range cases { - s, n, err := ParseTimestamps(c.in, c.def) - if s != c.expectedS || - n != c.expectedN || - (err == nil && c.expectedErr) || - (err != nil && !c.expectedErr) { - t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 729f4eb6..a8fae3ba 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -102,9 +102,10 @@ type ContainerStats struct { // Ping contains response of Engine API: // GET "/_ping" type Ping struct { - APIVersion string - OSType string - Experimental bool + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion } // ComponentVersion describes the version information for a specific component. @@ -204,6 +205,8 @@ type Info struct { RuncCommit Commit InitCommit Commit SecurityOptions []string + ProductLicense string `json:",omitempty"` + Warnings []string } // KeyValue holds a key/value pair @@ -512,7 +515,8 @@ type DiskUsage struct { Images []*ImageSummary Containers []*Container Volumes []*Volume - BuilderSize int64 + BuildCache []*BuildCache + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: @@ -539,6 +543,7 @@ type ImagesPruneReport struct { // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { + CachesDeleted []string SpaceReclaimed uint64 } @@ -585,3 +590,24 @@ type PushResult struct { type BuildResult struct { ID string } + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel b/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel deleted file mode 100644 index 1bb58ead..00000000 --- a/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["compare.go"], - importpath = "github.com/docker/docker/api/types/versions", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["compare_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/api/types/versions", -) diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911ed..00000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare_test.go b/vendor/github.com/docker/docker/api/types/versions/compare_test.go deleted file mode 100644 index 185e37c1..00000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "testing" -) - -func assertVersion(t *testing.T, a, b string, result int) { - if r := compare(a, b); r != result { - t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareVersion(t *testing.T) { - assertVersion(t, "1.12", "1.12", 0) - assertVersion(t, "1.0.0", "1", 0) - assertVersion(t, "1", "1.0.0", 0) - assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) - assertVersion(t, "1", "1.0.1", -1) - assertVersion(t, "1.0.1", "1", 1) - assertVersion(t, "1.0.1", "1.0.2", -1) - assertVersion(t, "1.0.2", "1.0.3", -1) - assertVersion(t, "1.0.3", "1.1", -1) - assertVersion(t, "1.1", "1.1.1", -1) - assertVersion(t, "1.1.1", "1.1.2", -1) - assertVersion(t, "1.1.2", "1.2", -1) -} diff --git a/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel b/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel deleted file mode 100644 index 460a26d8..00000000 --- a/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "volumes_create.go", - "volumes_list.go", - ], - importpath = "github.com/docker/docker/api/types/volume", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], -) diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go similarity index 81% rename from vendor/github.com/docker/docker/api/types/volume/volumes_create.go rename to vendor/github.com/docker/docker/api/types/volume/volume_create.go index b2dd3a41..f12e4861 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,9 +7,9 @@ package volume // import "github.com/docker/docker/api/types/volume" // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// VolumesCreateBody volumes create body -// swagger:model VolumesCreateBody -type VolumesCreateBody struct { +// VolumeCreateBody Volume configuration +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { // Name of the volume driver to use. // Required: true diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go similarity index 74% rename from vendor/github.com/docker/docker/api/types/volume/volumes_list.go rename to vendor/github.com/docker/docker/api/types/volume/volume_list.go index e071ca08..020198f7 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -9,9 +9,9 @@ package volume // import "github.com/docker/docker/api/types/volume" import "github.com/docker/docker/api/types" -// VolumesListOKBody volumes list o k body -// swagger:model VolumesListOKBody -type VolumesListOKBody struct { +// VolumeListOKBody Volume list response +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { // List of volumes // Required: true diff --git a/vendor/github.com/docker/docker/cli/BUILD.bazel b/vendor/github.com/docker/docker/cli/BUILD.bazel deleted file mode 100644 index bc9dd801..00000000 --- a/vendor/github.com/docker/docker/cli/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cobra.go", - "error.go", - "required.go", - ], - importpath = "github.com/docker/docker/cli", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go deleted file mode 100644 index da524303..00000000 --- a/vendor/github.com/docker/docker/cli/cobra.go +++ /dev/null @@ -1,150 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// SetupRootCommand sets default usage, help, and error handling for the -// root command. -func SetupRootCommand(rootCmd *cobra.Command) { - cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) - cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) - cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) - cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) - cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) - - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(FlagErrorFunc) - rootCmd.SetHelpCommand(helpCommand) - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") -} - -// FlagErrorFunc prints an error message which matches the format of the -// docker/docker/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return nil - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return StatusError{ - Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), - StatusCode: 125, - } -} - -var helpCommand = &cobra.Command{ - Use: "help [command]", - Short: "Help about the command", - PersistentPreRun: func(cmd *cobra.Command, args []string) {}, - PersistentPostRun: func(cmd *cobra.Command, args []string) {}, - RunE: func(c *cobra.Command, args []string) error { - cmd, args, e := c.Root().Find(args) - if cmd == nil || e != nil || len(args) > 0 { - return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) - } - - helpFunc := cmd.HelpFunc() - helpFunc(cmd, args) - return nil - }, -} - -func hasSubCommands(cmd *cobra.Command) bool { - return len(operationSubCommands(cmd)) > 0 -} - -func hasManagementSubCommands(cmd *cobra.Command) bool { - return len(managementSubCommands(cmd)) > 0 -} - -func operationSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && !sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -func wrappedFlagUsages(cmd *cobra.Command) string { - width := 80 - if ws, err := term.GetWinsize(0); err == nil { - width = int(ws.Width) - } - return cmd.Flags().FlagUsagesWrapped(width - 1) -} - -func managementSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -var usageTemplate = `Usage: - -{{- if not .HasSubCommands}} {{.UseLine}}{{end}} -{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} - -{{ .Short | trim }} - -{{- if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} - -{{- end}} -{{- if .HasExample}} - -Examples: -{{ .Example }} - -{{- end}} -{{- if .HasFlags}} - -Options: -{{ wrappedFlagUsages . | trimRightSpace}} - -{{- end}} -{{- if hasManagementSubCommands . }} - -Management Commands: - -{{- range managementSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} - -{{- end}} -{{- if hasSubCommands .}} - -Commands: - -{{- range operationSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} -{{- end}} - -{{- if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command. -{{- end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go deleted file mode 100644 index ea7c0eb5..00000000 --- a/vendor/github.com/docker/docker/cli/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "fmt" - "strings" -) - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go deleted file mode 100644 index e1ff02d2..00000000 --- a/vendor/github.com/docker/docker/cli/required.go +++ /dev/null @@ -1,27 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "strings" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return errors.Errorf( - "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} diff --git a/vendor/github.com/docker/docker/client/BUILD.bazel b/vendor/github.com/docker/docker/client/BUILD.bazel deleted file mode 100644 index f6688432..00000000 --- a/vendor/github.com/docker/docker/client/BUILD.bazel +++ /dev/null @@ -1,285 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "build_prune.go", - "checkpoint_create.go", - "checkpoint_delete.go", - "checkpoint_list.go", - "client.go", - "config_create.go", - "config_inspect.go", - "config_list.go", - "config_remove.go", - "config_update.go", - "container_attach.go", - "container_commit.go", - "container_copy.go", - "container_create.go", - "container_diff.go", - "container_exec.go", - "container_export.go", - "container_inspect.go", - "container_kill.go", - "container_list.go", - "container_logs.go", - "container_pause.go", - "container_prune.go", - "container_remove.go", - "container_rename.go", - "container_resize.go", - "container_restart.go", - "container_start.go", - "container_stats.go", - "container_stop.go", - "container_top.go", - "container_unpause.go", - "container_update.go", - "container_wait.go", - "disk_usage.go", - "distribution_inspect.go", - "errors.go", - "events.go", - "hijack.go", - "image_build.go", - "image_create.go", - "image_history.go", - "image_import.go", - "image_inspect.go", - "image_list.go", - "image_load.go", - "image_prune.go", - "image_pull.go", - "image_push.go", - "image_remove.go", - "image_save.go", - "image_search.go", - "image_tag.go", - "info.go", - "interface.go", - "interface_experimental.go", - "interface_stable.go", - "login.go", - "network_connect.go", - "network_create.go", - "network_disconnect.go", - "network_inspect.go", - "network_list.go", - "network_prune.go", - "network_remove.go", - "node_inspect.go", - "node_list.go", - "node_remove.go", - "node_update.go", - "ping.go", - "plugin_create.go", - "plugin_disable.go", - "plugin_enable.go", - "plugin_inspect.go", - "plugin_install.go", - "plugin_list.go", - "plugin_push.go", - "plugin_remove.go", - "plugin_set.go", - "plugin_upgrade.go", - "request.go", - "secret_create.go", - "secret_inspect.go", - "secret_list.go", - "secret_remove.go", - "secret_update.go", - "service_create.go", - "service_inspect.go", - "service_list.go", - "service_logs.go", - "service_remove.go", - "service_update.go", - "session.go", - "swarm_get_unlock_key.go", - "swarm_init.go", - "swarm_inspect.go", - "swarm_join.go", - "swarm_leave.go", - "swarm_unlock.go", - "swarm_update.go", - "task_inspect.go", - "task_list.go", - "task_logs.go", - "tlsconfig_clone.go", - "tlsconfig_clone_go17.go", - "transport.go", - "utils.go", - "version.go", - "volume_create.go", - "volume_inspect.go", - "volume_list.go", - "volume_prune.go", - "volume_remove.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "client_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/client", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/docker/docker/api:go_default_library", - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/events:go_default_library", - "//vendor/github.com/docker/docker/api/types/filters:go_default_library", - "//vendor/github.com/docker/docker/api/types/image:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/docker/api/types/time:go_default_library", - "//vendor/github.com/docker/docker/api/types/versions:go_default_library", - "//vendor/github.com/docker/docker/api/types/volume:go_default_library", - "//vendor/github.com/docker/go-connections/sockets:go_default_library", - "//vendor/github.com/docker/go-connections/tlsconfig:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/context/ctxhttp:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "checkpoint_create_test.go", - "checkpoint_delete_test.go", - "checkpoint_list_test.go", - "client_mock_test.go", - "client_test.go", - "config_create_test.go", - "config_inspect_test.go", - "config_list_test.go", - "config_remove_test.go", - "config_update_test.go", - "container_commit_test.go", - "container_copy_test.go", - "container_create_test.go", - "container_diff_test.go", - "container_exec_test.go", - "container_export_test.go", - "container_inspect_test.go", - "container_kill_test.go", - "container_list_test.go", - "container_logs_test.go", - "container_pause_test.go", - "container_prune_test.go", - "container_remove_test.go", - "container_rename_test.go", - "container_resize_test.go", - "container_restart_test.go", - "container_start_test.go", - "container_stats_test.go", - "container_stop_test.go", - "container_top_test.go", - "container_unpause_test.go", - "container_update_test.go", - "container_wait_test.go", - "disk_usage_test.go", - "distribution_inspect_test.go", - "events_test.go", - "image_build_test.go", - "image_create_test.go", - "image_history_test.go", - "image_import_test.go", - "image_inspect_test.go", - "image_list_test.go", - "image_load_test.go", - "image_prune_test.go", - "image_pull_test.go", - "image_push_test.go", - "image_remove_test.go", - "image_save_test.go", - "image_search_test.go", - "image_tag_test.go", - "info_test.go", - "network_connect_test.go", - "network_create_test.go", - "network_disconnect_test.go", - "network_inspect_test.go", - "network_list_test.go", - "network_prune_test.go", - "network_remove_test.go", - "node_inspect_test.go", - "node_list_test.go", - "node_remove_test.go", - "node_update_test.go", - "ping_test.go", - "plugin_disable_test.go", - "plugin_enable_test.go", - "plugin_inspect_test.go", - "plugin_list_test.go", - "plugin_push_test.go", - "plugin_remove_test.go", - "plugin_set_test.go", - "request_test.go", - "secret_create_test.go", - "secret_inspect_test.go", - "secret_list_test.go", - "secret_remove_test.go", - "secret_update_test.go", - "service_create_test.go", - "service_inspect_test.go", - "service_list_test.go", - "service_logs_test.go", - "service_remove_test.go", - "service_update_test.go", - "swarm_get_unlock_key_test.go", - "swarm_init_test.go", - "swarm_inspect_test.go", - "swarm_join_test.go", - "swarm_leave_test.go", - "swarm_unlock_test.go", - "swarm_update_test.go", - "task_inspect_test.go", - "task_list_test.go", - "volume_create_test.go", - "volume_inspect_test.go", - "volume_list_test.go", - "volume_remove_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/client", - deps = [ - "//vendor/github.com/docker/docker/api:go_default_library", - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/events:go_default_library", - "//vendor/github.com/docker/docker/api/types/filters:go_default_library", - "//vendor/github.com/docker/docker/api/types/image:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/docker/api/types/volume:go_default_library", - "//vendor/github.com/docker/docker/internal/testutil:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/env:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/github.com/opencontainers/go-digest:go_default_library", - "//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index 059dfb3c..00000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. - -For example, to list running containers (the equivalent of `docker ps`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" -) - -func main() { - cli, err := client.NewEnvClient() - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } -} -``` - -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 00000000..74df4950 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 3ad07ccb..42bbf99e 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -1,22 +1,37 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" + "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" ) // BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { if err := cli.NewVersionError("1.31", "build prune"); err != nil { return nil, err } report := types.BuildCachePruneReport{} - serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 6441ed25..921024fe 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -1,8 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointCreate creates a checkpoint from the given container with the given name diff --git a/vendor/github.com/docker/docker/client/checkpoint_create_test.go b/vendor/github.com/docker/docker/client/checkpoint_create_test.go deleted file mode 100644 index bd3e731e..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestCheckpointCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ - CheckpointID: "noting", - Exit: true, - }) - - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestCheckpointCreate(t *testing.T) { - expectedContainerID := "container_id" - expectedCheckpointID := "checkpoint_id" - expectedURL := "/containers/container_id/checkpoints" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - createOptions := &types.CheckpointCreateOptions{} - if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { - return nil, err - } - - if createOptions.CheckpointID != expectedCheckpointID { - return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) - } - - if !createOptions.Exit { - return nil, fmt.Errorf("expected Exit to be true") - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ - CheckpointID: expectedCheckpointID, - Exit: true, - }) - - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index 5b97c35c..54f55fa7 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointDelete deletes the checkpoint with the given name from the given container diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go deleted file mode 100644 index 6effb276..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestCheckpointDeleteError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ - CheckpointID: "checkpoint_id", - }) - - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestCheckpointDelete(t *testing.T) { - expectedURL := "/containers/container_id/checkpoints/checkpoint_id" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ - CheckpointID: "checkpoint_id", - }) - - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 8ba1ddff..2b73fb55 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointList returns the checkpoints of the given container in the docker host diff --git a/vendor/github.com/docker/docker/client/checkpoint_list_test.go b/vendor/github.com/docker/docker/client/checkpoint_list_test.go deleted file mode 100644 index 1851435c..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestCheckpointListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestCheckpointList(t *testing.T) { - expectedURL := "/containers/container_id/checkpoints" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal([]types.Checkpoint{ - { - Name: "checkpoint", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) - if err != nil { - t.Fatal(err) - } - if len(checkpoints) != 1 { - t.Fatalf("expected 1 checkpoint, got %v", checkpoints) - } -} - -func TestCheckpointListContainerNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{}) - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a containerNotFound error, got %v", err) - } -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index e129bb20..5031502a 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -42,6 +42,7 @@ For example, to list running containers (the equivalent of "docker ps"): package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "net" "net/http" @@ -57,7 +58,6 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ErrRedirect is the error returned by checkRedirect when the request is non-GET. @@ -173,10 +173,17 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) err // WithDialer applies the dialer.DialContext to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext func WithDialer(dialer *net.Dialer) func(*Client) error { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) func(*Client) error { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialer.DialContext + transport.DialContext = dialContext return nil } return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) @@ -356,6 +363,11 @@ func (cli *Client) DaemonHost() string { return cli.host } +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + // ParseHostURL parses a url string, validates the string is a host url, and // returns the parsed URL func ParseHostURL(host string) (*url.URL, error) { @@ -395,3 +407,16 @@ func (cli *Client) CustomHTTPHeaders() map[string]string { func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { cli.customHTTPHeaders = headers } + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_mock_test.go b/vendor/github.com/docker/docker/client/client_mock_test.go deleted file mode 100644 index 390a1eed..00000000 --- a/vendor/github.com/docker/docker/client/client_mock_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/docker/api/types" -) - -// transportFunc allows us to inject a mock transport for testing. We define it -// here so we can detect the tlsconfig and return nil for only this type. -type transportFunc func(*http.Request) (*http.Response, error) - -func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return tf(req) -} - -func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { - return &http.Client{ - Transport: transportFunc(doer), - } -} - -func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - header := http.Header{} - header.Set("Content-Type", "application/json") - - body, err := json.Marshal(&types.ErrorResponse{ - Message: message, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: statusCode, - Body: ioutil.NopCloser(bytes.NewReader(body)), - Header: header, - }, nil - } -} - -func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: statusCode, - Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), - }, nil - } -} diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go deleted file mode 100644 index 7cca04ac..00000000 --- a/vendor/github.com/docker/docker/client/client_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "net/http" - "net/url" - "os" - "runtime" - "testing" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/testutil" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/gotestyourself/gotestyourself/env" - "github.com/gotestyourself/gotestyourself/skip" -) - -func TestNewEnvClient(t *testing.T) { - skip.If(t, runtime.GOOS == "windows") - - testcases := []struct { - doc string - envs map[string]string - expectedError string - expectedVersion string - }{ - { - doc: "default api version", - envs: map[string]string{}, - expectedVersion: api.DefaultVersion, - }, - { - doc: "invalid cert path", - envs: map[string]string{ - "DOCKER_CERT_PATH": "invalid/path", - }, - expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory", - }, - { - doc: "default api version with cert path", - envs: map[string]string{ - "DOCKER_CERT_PATH": "testdata/", - }, - expectedVersion: api.DefaultVersion, - }, - { - doc: "default api version with cert path and tls verify", - envs: map[string]string{ - "DOCKER_CERT_PATH": "testdata/", - "DOCKER_TLS_VERIFY": "1", - }, - expectedVersion: api.DefaultVersion, - }, - { - doc: "default api version with cert path and host", - envs: map[string]string{ - "DOCKER_CERT_PATH": "testdata/", - "DOCKER_HOST": "https://notaunixsocket", - }, - expectedVersion: api.DefaultVersion, - }, - { - doc: "invalid docker host", - envs: map[string]string{ - "DOCKER_HOST": "host", - }, - expectedError: "unable to parse docker host `host`", - }, - { - doc: "invalid docker host, with good format", - envs: map[string]string{ - "DOCKER_HOST": "invalid://url", - }, - expectedVersion: api.DefaultVersion, - }, - { - doc: "override api version", - envs: map[string]string{ - "DOCKER_API_VERSION": "1.22", - }, - expectedVersion: "1.22", - }, - } - - defer env.PatchAll(t, nil)() - for _, c := range testcases { - env.PatchAll(t, c.envs) - apiclient, err := NewEnvClient() - if c.expectedError != "" { - assert.Check(t, is.Error(err, c.expectedError), c.doc) - } else { - assert.Check(t, err, c.doc) - version := apiclient.ClientVersion() - assert.Check(t, is.Equal(c.expectedVersion, version), c.doc) - } - - if c.envs["DOCKER_TLS_VERIFY"] != "" { - // pedantic checking that this is handled correctly - tr := apiclient.client.Transport.(*http.Transport) - assert.Assert(t, tr.TLSClientConfig != nil, c.doc) - assert.Check(t, is.Equal(tr.TLSClientConfig.InsecureSkipVerify, false), c.doc) - } - } -} - -func TestGetAPIPath(t *testing.T) { - testcases := []struct { - version string - path string - query url.Values - expected string - }{ - {"", "/containers/json", nil, "/containers/json"}, - {"", "/containers/json", url.Values{}, "/containers/json"}, - {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, - {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, - {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, - {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, - {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, - {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, - {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, - {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, - } - - for _, testcase := range testcases { - c := Client{version: testcase.version, basePath: "/"} - actual := c.getAPIPath(testcase.path, testcase.query) - assert.Check(t, is.Equal(actual, testcase.expected)) - } -} - -func TestParseHostURL(t *testing.T) { - testcases := []struct { - host string - expected *url.URL - expectedErr string - }{ - { - host: "", - expectedErr: "unable to parse docker host", - }, - { - host: "foobar", - expectedErr: "unable to parse docker host", - }, - { - host: "foo://bar", - expected: &url.URL{Scheme: "foo", Host: "bar"}, - }, - { - host: "tcp://localhost:2476", - expected: &url.URL{Scheme: "tcp", Host: "localhost:2476"}, - }, - { - host: "tcp://localhost:2476/path", - expected: &url.URL{Scheme: "tcp", Host: "localhost:2476", Path: "/path"}, - }, - } - - for _, testcase := range testcases { - actual, err := ParseHostURL(testcase.host) - if testcase.expectedErr != "" { - testutil.ErrorContains(t, err, testcase.expectedErr) - } - assert.Check(t, is.DeepEqual(testcase.expected, actual)) - } -} - -func TestNewEnvClientSetsDefaultVersion(t *testing.T) { - defer env.PatchAll(t, map[string]string{ - "DOCKER_HOST": "", - "DOCKER_API_VERSION": "", - "DOCKER_TLS_VERIFY": "", - "DOCKER_CERT_PATH": "", - })() - - client, err := NewEnvClient() - if err != nil { - t.Fatal(err) - } - assert.Check(t, is.Equal(client.version, api.DefaultVersion)) - - expected := "1.22" - os.Setenv("DOCKER_API_VERSION", expected) - client, err = NewEnvClient() - if err != nil { - t.Fatal(err) - } - assert.Check(t, is.Equal(expected, client.version)) -} - -// TestNegotiateAPIVersionEmpty asserts that client.Client can -// negotiate a compatible APIVersion when omitted -func TestNegotiateAPIVersionEmpty(t *testing.T) { - defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": ""}) - - client, err := NewEnvClient() - assert.NilError(t, err) - - ping := types.Ping{ - APIVersion: "", - OSType: "linux", - Experimental: false, - } - - // set our version to something new - client.version = "1.25" - - // if no version from server, expect the earliest - // version before APIVersion was implemented - expected := "1.24" - - // test downgrade - client.NegotiateAPIVersionPing(ping) - assert.Check(t, is.Equal(expected, client.version)) -} - -// TestNegotiateAPIVersion asserts that client.Client can -// negotiate a compatible APIVersion with the server -func TestNegotiateAPIVersion(t *testing.T) { - client, err := NewEnvClient() - assert.NilError(t, err) - - expected := "1.21" - ping := types.Ping{ - APIVersion: expected, - OSType: "linux", - Experimental: false, - } - - // set our version to something new - client.version = "1.22" - - // test downgrade - client.NegotiateAPIVersionPing(ping) - assert.Check(t, is.Equal(expected, client.version)) - - // set the client version to something older, and verify that we keep the - // original setting. - expected = "1.20" - client.version = expected - client.NegotiateAPIVersionPing(ping) - assert.Check(t, is.Equal(expected, client.version)) - -} - -// TestNegotiateAPIVersionOverride asserts that we honor -// the environment variable DOCKER_API_VERSION when negotiating versions -func TestNegotiateAPVersionOverride(t *testing.T) { - expected := "9.99" - defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": expected})() - - client, err := NewEnvClient() - assert.NilError(t, err) - - ping := types.Ping{ - APIVersion: "1.24", - OSType: "linux", - Experimental: false, - } - - // test that we honored the env var - client.NegotiateAPIVersionPing(ping) - assert.Check(t, is.Equal(expected, client.version)) -} - -type roundTripFunc func(*http.Request) (*http.Response, error) - -func (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return rtf(req) -} - -type bytesBufferClose struct { - *bytes.Buffer -} - -func (bbc bytesBufferClose) Close() error { - return nil -} - -func TestClientRedirect(t *testing.T) { - client := &http.Client{ - CheckRedirect: CheckRedirect, - Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { - if req.URL.String() == "/bla" { - return &http.Response{StatusCode: 404}, nil - } - return &http.Response{ - StatusCode: 301, - Header: map[string][]string{"Location": {"/bla"}}, - Body: bytesBufferClose{bytes.NewBuffer(nil)}, - }, nil - }), - } - - cases := []struct { - httpMethod string - expectedErr *url.Error - statusCode int - }{ - {http.MethodGet, nil, 301}, - {http.MethodPost, &url.Error{Op: "Post", URL: "/bla", Err: ErrRedirect}, 301}, - {http.MethodPut, &url.Error{Op: "Put", URL: "/bla", Err: ErrRedirect}, 301}, - {http.MethodDelete, &url.Error{Op: "Delete", URL: "/bla", Err: ErrRedirect}, 301}, - } - - for _, tc := range cases { - req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) - assert.Check(t, err) - resp, err := client.Do(req) - assert.Check(t, is.Equal(tc.statusCode, resp.StatusCode)) - if tc.expectedErr == nil { - assert.Check(t, is.Nil(err)) - } else { - urlError, ok := err.(*url.Error) - assert.Assert(t, ok, "%T is not *url.Error", err) - assert.Check(t, is.Equal(*tc.expectedErr, *urlError)) - } - } -} diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index b6d15b70..c8b802ad 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ConfigCreate creates a new Config. diff --git a/vendor/github.com/docker/docker/client/config_create_test.go b/vendor/github.com/docker/docker/client/config_create_test.go deleted file mode 100644 index 2ee8f1fd..00000000 --- a/vendor/github.com/docker/docker/client/config_create_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestConfigCreateUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) - assert.Check(t, is.Error(err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestConfigCreateError(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestConfigCreate(t *testing.T) { - expectedURL := "/v1.30/configs/create" - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - b, err := json.Marshal(types.ConfigCreateResponse{ - ID: "test_config", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusCreated, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) - if err != nil { - t.Fatal(err) - } - if r.ID != "test_config" { - t.Fatalf("expected `test_config`, got %s", r.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 7ae1376e..4ac566ad 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ConfigInspectWithRaw returns the config information with raw data diff --git a/vendor/github.com/docker/docker/client/config_inspect_test.go b/vendor/github.com/docker/docker/client/config_inspect_test.go deleted file mode 100644 index 9d5af0bf..00000000 --- a/vendor/github.com/docker/docker/client/config_inspect_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestConfigInspectNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a NotFoundError error, got %v", err) - } -} - -func TestConfigInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.ConfigInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestConfigInspectUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") - assert.Check(t, is.Error(err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestConfigInspectError(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestConfigInspectConfigNotFound(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a configNotFoundError error, got %v", err) - } -} - -func TestConfigInspect(t *testing.T) { - expectedURL := "/v1.30/configs/config_id" - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Config{ - ID: "config_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - configInspect, _, err := client.ConfigInspectWithRaw(context.Background(), "config_id") - if err != nil { - t.Fatal(err) - } - if configInspect.ID != "config_id" { - t.Fatalf("expected `config_id`, got %s", configInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index b9d2632f..2b9d5460 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ConfigList returns the list of configs. diff --git a/vendor/github.com/docker/docker/client/config_list_test.go b/vendor/github.com/docker/docker/client/config_list_test.go deleted file mode 100644 index 0cd99c50..00000000 --- a/vendor/github.com/docker/docker/client/config_list_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestConfigListUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) - assert.Check(t, is.Error(err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestConfigListError(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestConfigList(t *testing.T) { - expectedURL := "/v1.30/configs" - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - - listCases := []struct { - options types.ConfigListOptions - expectedQueryParams map[string]string - }{ - { - options: types.ConfigListOptions{}, - expectedQueryParams: map[string]string{ - "filters": "", - }, - }, - { - options: types.ConfigListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": `{"label":{"label1":true,"label2":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]swarm.Config{ - { - ID: "config_id1", - }, - { - ID: "config_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - configs, err := client.ConfigList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(configs) != 2 { - t.Fatalf("expected 2 configs, got %v", configs) - } - } -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 9c8f293f..a96871e9 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ConfigRemove removes a Config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { diff --git a/vendor/github.com/docker/docker/client/config_remove_test.go b/vendor/github.com/docker/docker/client/config_remove_test.go deleted file mode 100644 index 25a5c4ac..00000000 --- a/vendor/github.com/docker/docker/client/config_remove_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestConfigRemoveUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - err := client.ConfigRemove(context.Background(), "config_id") - assert.Check(t, is.Error(err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestConfigRemoveError(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ConfigRemove(context.Background(), "config_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestConfigRemove(t *testing.T) { - expectedURL := "/v1.30/configs/config_id" - - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.ConfigRemove(context.Background(), "config_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 6b24024c..39e59cf8 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ConfigUpdate attempts to update a Config diff --git a/vendor/github.com/docker/docker/client/config_update_test.go b/vendor/github.com/docker/docker/client/config_update_test.go deleted file mode 100644 index a7eea2f8..00000000 --- a/vendor/github.com/docker/docker/client/config_update_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestConfigUpdateUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) - assert.Check(t, is.Error(err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestConfigUpdateError(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestConfigUpdate(t *testing.T) { - expectedURL := "/v1.30/configs/config_id/update" - - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 1a2a4319..88ba1ef6 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerAttach attaches a connection to a container in the server. diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 95219beb..377a2ea6 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "errors" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerCommit applies changes into a container and creates a new tagged image. diff --git a/vendor/github.com/docker/docker/client/container_commit_test.go b/vendor/github.com/docker/docker/client/container_commit_test.go deleted file mode 100644 index 918734a8..00000000 --- a/vendor/github.com/docker/docker/client/container_commit_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestContainerCommitError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerCommit(t *testing.T) { - expectedURL := "/commit" - expectedContainerID := "container_id" - specifiedReference := "repository_name:tag" - expectedRepositoryName := "repository_name" - expectedTag := "tag" - expectedComment := "comment" - expectedAuthor := "author" - expectedChanges := []string{"change1", "change2"} - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - containerID := query.Get("container") - if containerID != expectedContainerID { - return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) - } - repo := query.Get("repo") - if repo != expectedRepositoryName { - return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) - } - tag := query.Get("tag") - if tag != expectedTag { - return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) - } - comment := query.Get("comment") - if comment != expectedComment { - return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) - } - author := query.Get("author") - if author != expectedAuthor { - return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) - } - pause := query.Get("pause") - if pause != "0" { - return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) - } - changes := query["changes"] - if len(changes) != len(expectedChanges) { - return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) - } - b, err := json.Marshal(types.IDResponse{ - ID: "new_container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ - Reference: specifiedReference, - Comment: expectedComment, - Author: expectedAuthor, - Changes: expectedChanges, - Pause: false, - }) - if err != nil { - t.Fatal(err) - } - if r.ID != "new_container_id" { - t.Fatalf("expected `new_container_id`, got %s", r.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 036298c1..d706260c 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -10,8 +11,6 @@ import ( "path/filepath" "strings" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_copy_test.go b/vendor/github.com/docker/docker/client/container_copy_test.go deleted file mode 100644 index 8fc3445d..00000000 --- a/vendor/github.com/docker/docker/client/container_copy_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestContainerStatPathError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerStatPath(context.Background(), "container_id", "path") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestContainerStatPathNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Not found")), - } - _, err := client.ContainerStatPath(context.Background(), "container_id", "path") - if !IsErrNotFound(err) { - t.Fatalf("expected a not found error, got %v", err) - } -} - -func TestContainerStatPathNoHeaderError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestContainerStatPath(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "HEAD" { - return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly") - } - content, err := json.Marshal(types.ContainerPathStat{ - Name: "name", - Mode: 0700, - }) - if err != nil { - return nil, err - } - base64PathStat := base64.StdEncoding.EncodeToString(content) - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - Header: http.Header{ - "X-Docker-Container-Path-Stat": []string{base64PathStat}, - }, - }, nil - }), - } - stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) - if err != nil { - t.Fatal(err) - } - if stat.Name != "name" { - t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) - } - if stat.Mode != 0700 { - t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) - } -} - -func TestCopyToContainerError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestCopyToContainerNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Not found")), - } - err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) - if !IsErrNotFound(err) { - t.Fatalf("expected a not found error, got %v", err) - } -} - -func TestCopyToContainerNotStatusOKError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNoContent, "No content")), - } - err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) - if err == nil || err.Error() != "unexpected status code from daemon: 204" { - t.Fatalf("expected an unexpected status code error, got %v", err) - } -} - -func TestCopyToContainer(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "PUT" { - return nil, fmt.Errorf("expected PUT method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) - } - noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") - if noOverwriteDirNonDir != "true" { - return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) - } - - content, err := ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - if err := req.Body.Close(); err != nil { - return nil, err - } - if string(content) != "content" { - return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestCopyFromContainerError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestCopyFromContainerNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Not found")), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if !IsErrNotFound(err) { - t.Fatalf("expected a not found error, got %v", err) - } -} - -func TestCopyFromContainerNotStatusOKError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNoContent, "No content")), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil || err.Error() != "unexpected status code from daemon: 204" { - t.Fatalf("expected an unexpected status code error, got %v", err) - } -} - -func TestCopyFromContainerNoHeaderError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestCopyFromContainer(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) - } - - headercontent, err := json.Marshal(types.ContainerPathStat{ - Name: "name", - Mode: 0700, - }) - if err != nil { - return nil, err - } - base64PathStat := base64.StdEncoding.EncodeToString(headercontent) - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), - Header: http.Header{ - "X-Docker-Container-Path-Stat": []string{base64PathStat}, - }, - }, nil - }), - } - r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) - if err != nil { - t.Fatal(err) - } - if stat.Name != "name" { - t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) - } - if stat.Mode != 0700 { - t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) - } - content, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if err := r.Close(); err != nil { - t.Fatal(err) - } - if string(content) != "content" { - t.Fatalf("expected content to be 'content', got %s", string(content)) - } -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 0af82a19..d269a618 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" @@ -8,7 +9,6 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) type configWrapper struct { diff --git a/vendor/github.com/docker/docker/client/container_create_test.go b/vendor/github.com/docker/docker/client/container_create_test.go deleted file mode 100644 index 6ad6fda0..00000000 --- a/vendor/github.com/docker/docker/client/container_create_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -func TestContainerCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) - } - - // 404 doesn't automatically means an unknown image - client = &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) - } -} - -func TestContainerCreateImageNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "No such image")), - } - _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected an imageNotFound error, got %v", err) - } -} - -func TestContainerCreateWithName(t *testing.T) { - expectedURL := "/containers/create" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - name := req.URL.Query().Get("name") - if name != "container_name" { - return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) - } - b, err := json.Marshal(container.ContainerCreateCreatedBody{ - ID: "container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } -} - -// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 -// or up, AutoRemove should not be disabled. -func TestContainerCreateAutoRemove(t *testing.T) { - autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - var config configWrapper - - if err := json.NewDecoder(req.Body).Decode(&config); err != nil { - return nil, err - } - if config.HostConfig.AutoRemove != expectedValue { - return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) - } - b, err := json.Marshal(container.ContainerCreateCreatedBody{ - ID: "container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - } - } - - client := &Client{ - client: newMockClient(autoRemoveValidator(false)), - version: "1.24", - } - if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { - t.Fatal(err) - } - client = &Client{ - client: newMockClient(autoRemoveValidator(true)), - version: "1.25", - } - if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index f03ebf1d..3b7c90c9 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerDiff shows differences in a container filesystem since it was started. diff --git a/vendor/github.com/docker/docker/client/container_diff_test.go b/vendor/github.com/docker/docker/client/container_diff_test.go deleted file mode 100644 index 5246f060..00000000 --- a/vendor/github.com/docker/docker/client/container_diff_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -func TestContainerDiffError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerDiff(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - -} - -func TestContainerDiff(t *testing.T) { - expectedURL := "/containers/container_id/changes" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - b, err := json.Marshal([]container.ContainerChangeResponseItem{ - { - Kind: 0, - Path: "/path/1", - }, - { - Kind: 1, - Path: "/path/2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - changes, err := client.ContainerDiff(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if len(changes) != 2 { - t.Fatalf("expected an array of 2 changes, got %v", changes) - } -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 59db8a90..535536b1 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerExecCreate creates a new exec configuration to run an exec process. diff --git a/vendor/github.com/docker/docker/client/container_exec_test.go b/vendor/github.com/docker/docker/client/container_exec_test.go deleted file mode 100644 index 13cf8898..00000000 --- a/vendor/github.com/docker/docker/client/container_exec_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestContainerExecCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecCreate(t *testing.T) { - expectedURL := "/containers/container_id/exec" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - // FIXME validate the content is the given ExecConfig ? - if err := req.ParseForm(); err != nil { - return nil, err - } - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { - return nil, err - } - if execConfig.User != "user" { - return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) - } - b, err := json.Marshal(types.IDResponse{ - ID: "exec_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ - User: "user", - }) - if err != nil { - t.Fatal(err) - } - if r.ID != "exec_id" { - t.Fatalf("expected `exec_id`, got %s", r.ID) - } -} - -func TestContainerExecStartError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecStart(t *testing.T) { - expectedURL := "/exec/exec_id/start" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if err := req.ParseForm(); err != nil { - return nil, err - } - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { - return nil, err - } - if execStartCheck.Tty || !execStartCheck.Detach { - return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ - Detach: true, - Tty: false, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerExecInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExecInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecInspect(t *testing.T) { - expectedURL := "/exec/exec_id/json" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - b, err := json.Marshal(types.ContainerExecInspect{ - ExecID: "exec_id", - ContainerID: "container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") - if err != nil { - t.Fatal(err) - } - if inspect.ExecID != "exec_id" { - t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) - } - if inspect.ContainerID != "container_id" { - t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) - } -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go index f4c2bee3..d0c0a5cb 100644 --- a/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -1,10 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ContainerExport retrieves the raw contents of a container diff --git a/vendor/github.com/docker/docker/client/container_export_test.go b/vendor/github.com/docker/docker/client/container_export_test.go deleted file mode 100644 index 5d41a45e..00000000 --- a/vendor/github.com/docker/docker/client/container_export_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerExportError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExport(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExport(t *testing.T) { - expectedURL := "/containers/container_id/export" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - body, err := client.ContainerExport(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - defer body.Close() - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index d1dbd474..f453064c 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerInspect returns the container information. diff --git a/vendor/github.com/docker/docker/client/container_inspect_test.go b/vendor/github.com/docker/docker/client/container_inspect_test.go deleted file mode 100644 index eeb903e4..00000000 --- a/vendor/github.com/docker/docker/client/container_inspect_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestContainerInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ContainerInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerInspectContainerNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.ContainerInspect(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a containerNotFound error, got %v", err) - } -} - -func TestContainerInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.ContainerInspectWithRaw(context.Background(), "", true) - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestContainerInspect(t *testing.T) { - expectedURL := "/containers/container_id/json" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - ID: "container_id", - Image: "image", - Name: "name", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.ContainerInspect(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } - if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.Image) - } - if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.Name) - } -} - -func TestContainerInspectNode(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - content, err := json.Marshal(types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - ID: "container_id", - Image: "image", - Name: "name", - Node: &types.ContainerNode{ - ID: "container_node_id", - Addr: "container_node", - Labels: map[string]string{"foo": "bar"}, - }, - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.ContainerInspect(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } - if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.Image) - } - if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.Name) - } - if r.Node.ID != "container_node_id" { - t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) - } - if r.Node.Addr != "container_node" { - t.Fatalf("expected `container_node`, got %s", r.Node.Addr) - } - foo, ok := r.Node.Labels["foo"] - if foo != "bar" || !ok { - t.Fatalf("expected `bar` for label `foo`") - } -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index c5a9fe75..4d6f1d23 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerKill terminates the container process but does not remove the container from the docker host. diff --git a/vendor/github.com/docker/docker/client/container_kill_test.go b/vendor/github.com/docker/docker/client/container_kill_test.go deleted file mode 100644 index a4a1bfed..00000000 --- a/vendor/github.com/docker/docker/client/container_kill_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerKillError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerKill(t *testing.T) { - expectedURL := "/containers/container_id/kill" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - signal := req.URL.Query().Get("signal") - if signal != "SIGKILL" { - return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index cd2e1383..9c218e22 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainerList returns the list of containers in the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list_test.go b/vendor/github.com/docker/docker/client/container_list_test.go deleted file mode 100644 index 5ca4c840..00000000 --- a/vendor/github.com/docker/docker/client/container_list_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -func TestContainerListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerList(t *testing.T) { - expectedURL := "/containers/json" - expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - all := query.Get("all") - if all != "1" { - return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) - } - limit := query.Get("limit") - if limit != "0" { - return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) - } - since := query.Get("since") - if since != "container" { - return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) - } - before := query.Get("before") - if before != "" { - return nil, fmt.Errorf("before should have not be present in query, go %s", before) - } - size := query.Get("size") - if size != "1" { - return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) - } - filters := query.Get("filters") - if filters != expectedFilters { - return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) - } - - b, err := json.Marshal([]types.Container{ - { - ID: "container_id1", - }, - { - ID: "container_id2", - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - filters.Add("before", "container") - containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ - Size: true, - All: true, - Since: "container", - Filters: filters, - }) - if err != nil { - t.Fatal(err) - } - if len(containers) != 2 { - t.Fatalf("expected 2 containers, got %v", containers) - } -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 01d88a32..5b6541f0 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -1,14 +1,14 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. @@ -46,7 +46,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } @@ -54,7 +54,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Until != "" { ts, err := timetypes.GetTimestamp(options.Until, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "until"`) } query.Set("until", ts) } diff --git a/vendor/github.com/docker/docker/client/container_logs_test.go b/vendor/github.com/docker/docker/client/container_logs_test.go deleted file mode 100644 index 2ad76df6..00000000 --- a/vendor/github.com/docker/docker/client/container_logs_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/testutil" - - "golang.org/x/net/context" -) - -func TestContainerLogsNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Not found")), - } - _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) - if !IsErrNotFound(err) { - t.Fatalf("expected a not found error, got %v", err) - } -} - -func TestContainerLogsError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ - Since: "2006-01-02TZ", - }) - testutil.ErrorContains(t, err, `parsing time "2006-01-02TZ"`) - _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ - Until: "2006-01-02TZ", - }) - testutil.ErrorContains(t, err, `parsing time "2006-01-02TZ"`) -} - -func TestContainerLogs(t *testing.T) { - expectedURL := "/containers/container_id/logs" - cases := []struct { - options types.ContainerLogsOptions - expectedQueryParams map[string]string - }{ - { - expectedQueryParams: map[string]string{ - "tail": "", - }, - }, - { - options: types.ContainerLogsOptions{ - Tail: "any", - }, - expectedQueryParams: map[string]string{ - "tail": "any", - }, - }, - { - options: types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: true, - Details: true, - Follow: true, - }, - expectedQueryParams: map[string]string{ - "tail": "", - "stdout": "1", - "stderr": "1", - "timestamps": "1", - "details": "1", - "follow": "1", - }, - }, - { - options: types.ContainerLogsOptions{ - // An complete invalid date, timestamp or go duration will be - // passed as is - Since: "invalid but valid", - }, - expectedQueryParams: map[string]string{ - "tail": "", - "since": "invalid but valid", - }, - }, - { - options: types.ContainerLogsOptions{ - // An complete invalid date, timestamp or go duration will be - // passed as is - Until: "invalid but valid", - }, - expectedQueryParams: map[string]string{ - "tail": "", - "until": "invalid but valid", - }, - }, - } - for _, logCase := range cases { - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - // Check query parameters - query := r.URL.Query() - for key, expected := range logCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) - if err != nil { - t.Fatal(err) - } - defer body.Close() - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } - } -} - -func ExampleClient_ContainerLogs_withTimeout() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, _ := NewEnvClient() - reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) - if err != nil { - log.Fatal(err) - } - - _, err = io.Copy(os.Stdout, reader) - if err != nil && err != io.EOF { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go index fa0a9011..5e7271a3 100644 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_pause_test.go b/vendor/github.com/docker/docker/client/container_pause_test.go deleted file mode 100644 index ccf619c4..00000000 --- a/vendor/github.com/docker/docker/client/container_pause_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerPauseError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerPause(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerPause(t *testing.T) { - expectedURL := "/containers/container_id/pause" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ContainerPause(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 4129841d..14f88d93 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainersPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/container_prune_test.go b/vendor/github.com/docker/docker/client/container_prune_test.go deleted file mode 100644 index 7ffd9c7e..00000000 --- a/vendor/github.com/docker/docker/client/container_prune_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestContainersPruneError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - version: "1.25", - } - - filters := filters.NewArgs() - - _, err := client.ContainersPrune(context.Background(), filters) - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestContainersPrune(t *testing.T) { - expectedURL := "/v1.25/containers/prune" - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - danglingUntilFilters := filters.NewArgs() - danglingUntilFilters.Add("dangling", "true") - danglingUntilFilters.Add("until", "2016-12-15T14:00") - - labelFilters := filters.NewArgs() - labelFilters.Add("dangling", "true") - labelFilters.Add("label", "label1=foo") - labelFilters.Add("label", "label2!=bar") - - listCases := []struct { - filters filters.Args - expectedQueryParams map[string]string - }{ - { - filters: filters.Args{}, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": "", - }, - }, - { - filters: danglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true}}`, - }, - }, - { - filters: danglingUntilFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true},"until":{"2016-12-15T14:00":true}}`, - }, - }, - { - filters: noDanglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"false":true}}`, - }, - }, - { - filters: labelFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - assert.Check(t, is.Equal(expected, actual)) - } - content, err := json.Marshal(types.ContainersPruneReport{ - ContainersDeleted: []string{"container_id1", "container_id2"}, - SpaceReclaimed: 9999, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - version: "1.25", - } - - report, err := client.ContainersPrune(context.Background(), listCase.filters) - assert.Check(t, err) - assert.Check(t, is.Len(report.ContainersDeleted, 2)) - assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) - } -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index c7829143..ab4cfc16 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerRemove kills and removes a container from the docker host. diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go deleted file mode 100644 index 537272cd..00000000 --- a/vendor/github.com/docker/docker/client/container_remove_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestContainerRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestContainerRemoveNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "missing")), - } - err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - assert.Check(t, is.Error(err, "Error: No such container: container_id")) - assert.Check(t, IsErrNotFound(err)) -} - -func TestContainerRemove(t *testing.T) { - expectedURL := "/containers/container_id" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - volume := query.Get("v") - if volume != "1" { - return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) - } - force := query.Get("force") - if force != "1" { - return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) - } - link := query.Get("link") - if link != "" { - return nil, fmt.Errorf("link should have not be present in query, go %s", link) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - assert.Check(t, err) -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go index ad43e398..240fdf55 100644 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerRename changes the name of a given container. diff --git a/vendor/github.com/docker/docker/client/container_rename_test.go b/vendor/github.com/docker/docker/client/container_rename_test.go deleted file mode 100644 index 9b5ca7fb..00000000 --- a/vendor/github.com/docker/docker/client/container_rename_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerRenameError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerRename(context.Background(), "nothing", "newNothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerRename(t *testing.T) { - expectedURL := "/containers/container_id/rename" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - name := req.URL.Query().Get("name") - if name != "newName" { - return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerRename(context.Background(), "container_id", "newName") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index 8f1244e2..a9d4c0c7 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerResize changes the size of the tty for a container. diff --git a/vendor/github.com/docker/docker/client/container_resize_test.go b/vendor/github.com/docker/docker/client/container_resize_test.go deleted file mode 100644 index d5f4363c..00000000 --- a/vendor/github.com/docker/docker/client/container_resize_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestContainerResizeError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecResizeError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerResize(t *testing.T) { - client := &Client{ - client: newMockClient(resizeTransport("/containers/container_id/resize")), - } - - err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ - Height: 500, - Width: 600, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerExecResize(t *testing.T) { - client := &Client{ - client: newMockClient(resizeTransport("/exec/exec_id/resize")), - } - - err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ - Height: 500, - Width: 600, - }) - if err != nil { - t.Fatal(err) - } -} - -func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - h := query.Get("h") - if h != "500" { - return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) - } - w := query.Get("w") - if w != "600" { - return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - } -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 8c8085dc..41e42196 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) // ContainerRestart stops and starts a container again. diff --git a/vendor/github.com/docker/docker/client/container_restart_test.go b/vendor/github.com/docker/docker/client/container_restart_test.go deleted file mode 100644 index f20d9699..00000000 --- a/vendor/github.com/docker/docker/client/container_restart_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - "time" - - "golang.org/x/net/context" -) - -func TestContainerRestartError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - timeout := 0 * time.Second - err := client.ContainerRestart(context.Background(), "nothing", &timeout) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerRestart(t *testing.T) { - expectedURL := "/containers/container_id/restart" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - t := req.URL.Query().Get("t") - if t != "100" { - return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - timeout := 100 * time.Second - err := client.ContainerRestart(context.Background(), "container_id", &timeout) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index d06b6370..c2e0b15d 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -1,10 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_start_test.go b/vendor/github.com/docker/docker/client/container_start_test.go deleted file mode 100644 index 89018a80..00000000 --- a/vendor/github.com/docker/docker/client/container_start_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestContainerStartError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerStart(t *testing.T) { - expectedURL := "/containers/container_id/start" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - // we're not expecting any payload, but if one is supplied, check it is valid. - if req.Header.Get("Content-Type") == "application/json" { - var startConfig interface{} - if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { - return nil, fmt.Errorf("Unable to parse json: %s", err) - } - } - - checkpoint := req.URL.Query().Get("checkpoint") - if checkpoint != "checkpoint_id" { - return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index a7a2b8ec..6ef44c77 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerStats returns near realtime stats for a given container. diff --git a/vendor/github.com/docker/docker/client/container_stats_test.go b/vendor/github.com/docker/docker/client/container_stats_test.go deleted file mode 100644 index e16fcafb..00000000 --- a/vendor/github.com/docker/docker/client/container_stats_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerStatsError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerStats(context.Background(), "nothing", false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerStats(t *testing.T) { - expectedURL := "/containers/container_id/stats" - cases := []struct { - stream bool - expectedStream string - }{ - { - expectedStream: "0", - }, - { - stream: true, - expectedStream: "1", - }, - } - for _, c := range cases { - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - - query := r.URL.Query() - stream := query.Get("stream") - if stream != c.expectedStream { - return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - content, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } - } -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index ca316666..629d7ab6 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -1,15 +1,20 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { query := url.Values{} if timeout != nil { diff --git a/vendor/github.com/docker/docker/client/container_stop_test.go b/vendor/github.com/docker/docker/client/container_stop_test.go deleted file mode 100644 index 101625b0..00000000 --- a/vendor/github.com/docker/docker/client/container_stop_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - "time" - - "golang.org/x/net/context" -) - -func TestContainerStopError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - timeout := 0 * time.Second - err := client.ContainerStop(context.Background(), "nothing", &timeout) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerStop(t *testing.T) { - expectedURL := "/containers/container_id/stop" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - t := req.URL.Query().Get("t") - if t != "100" { - return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - timeout := 100 * time.Second - err := client.ContainerStop(context.Background(), "container_id", &timeout) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index 55841ce9..9c9fce7a 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerTop shows process information from within a container. diff --git a/vendor/github.com/docker/docker/client/container_top_test.go b/vendor/github.com/docker/docker/client/container_top_test.go deleted file mode 100644 index 24f54613..00000000 --- a/vendor/github.com/docker/docker/client/container_top_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -func TestContainerTopError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerTop(context.Background(), "nothing", []string{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerTop(t *testing.T) { - expectedURL := "/containers/container_id/top" - expectedProcesses := [][]string{ - {"p1", "p2"}, - {"p3"}, - } - expectedTitles := []string{"title1", "title2"} - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - args := query.Get("ps_args") - if args != "arg1 arg2" { - return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) - } - - b, err := json.Marshal(container.ContainerTopOKBody{ - Processes: [][]string{ - {"p1", "p2"}, - {"p3"}, - }, - Titles: []string{"title1", "title2"}, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expectedProcesses, processList.Processes) { - t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) - } - if !reflect.DeepEqual(expectedTitles, processList.Titles) { - t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) - } -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go index b75fb0c3..1d8f8731 100644 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_unpause_test.go b/vendor/github.com/docker/docker/client/container_unpause_test.go deleted file mode 100644 index 0c7fe8cb..00000000 --- a/vendor/github.com/docker/docker/client/container_unpause_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerUnpauseError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerUnpause(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerUnpause(t *testing.T) { - expectedURL := "/containers/container_id/unpause" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ContainerUnpause(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 218ec90f..14e7f23d 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerUpdate updates resources of a container diff --git a/vendor/github.com/docker/docker/client/container_update_test.go b/vendor/github.com/docker/docker/client/container_update_test.go deleted file mode 100644 index 9f7c951c..00000000 --- a/vendor/github.com/docker/docker/client/container_update_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -func TestContainerUpdateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerUpdate(t *testing.T) { - expectedURL := "/containers/container_id/update" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - b, err := json.Marshal(container.ContainerUpdateOKBody{}) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ - Resources: container.Resources{ - CPUPeriod: 1, - }, - RestartPolicy: container.RestartPolicy{ - Name: "always", - }, - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 87b0dc1a..6ab8c1da 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,11 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" ) diff --git a/vendor/github.com/docker/docker/client/container_wait_test.go b/vendor/github.com/docker/docker/client/container_wait_test.go deleted file mode 100644 index e25abe64..00000000 --- a/vendor/github.com/docker/docker/client/container_wait_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - - "golang.org/x/net/context" -) - -func TestContainerWaitError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - resultC, errC := client.ContainerWait(context.Background(), "nothing", "") - select { - case result := <-resultC: - t.Fatalf("expected to not get a wait result, got %d", result.StatusCode) - case err := <-errC: - if err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - } -} - -func TestContainerWait(t *testing.T) { - expectedURL := "/containers/container_id/wait" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - b, err := json.Marshal(container.ContainerWaitOKBody{ - StatusCode: 15, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - resultC, errC := client.ContainerWait(context.Background(), "container_id", "") - select { - case err := <-errC: - t.Fatal(err) - case result := <-resultC: - if result.StatusCode != 15 { - t.Fatalf("expected a status code equal to '15', got %d", result.StatusCode) - } - } -} - -func ExampleClient_ContainerWait_withTimeout() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, _ := NewEnvClient() - _, errC := client.ContainerWait(ctx, "container_id", "") - if err := <-errC; err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 831fd40e..8eb30eb5 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // DiskUsage requests the current data usage from the daemon diff --git a/vendor/github.com/docker/docker/client/disk_usage_test.go b/vendor/github.com/docker/docker/client/disk_usage_test.go deleted file mode 100644 index 615c1e64..00000000 --- a/vendor/github.com/docker/docker/client/disk_usage_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestDiskUsageError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.DiskUsage(context.Background()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestDiskUsage(t *testing.T) { - expectedURL := "/system/df" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - du := types.DiskUsage{ - LayersSize: int64(100), - Images: nil, - Containers: nil, - Volumes: nil, - } - - b, err := json.Marshal(du) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - if _, err := client.DiskUsage(context.Background()); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 4644ea46..7245bbee 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" registrytypes "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // DistributionInspect returns the image digest with full Manifest diff --git a/vendor/github.com/docker/docker/client/distribution_inspect_test.go b/vendor/github.com/docker/docker/client/distribution_inspect_test.go deleted file mode 100644 index d4124bfa..00000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/http" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestDistributionInspectUnsupported(t *testing.T) { - client := &Client{ - version: "1.29", - client: &http.Client{}, - } - _, err := client.DistributionInspect(context.Background(), "foobar:1.0", "") - assert.Check(t, is.Error(err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) -} - -func TestDistributionInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, err := client.DistributionInspect(context.Background(), "", "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 05c12462..0461af32 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "fmt" - "net/http" "github.com/docker/docker/api/types/versions" diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index d5d4dd8a..6e565389 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -1,12 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/docker/docker/client/events_test.go b/vendor/github.com/docker/docker/client/events_test.go deleted file mode 100644 index 5ea60913..00000000 --- a/vendor/github.com/docker/docker/client/events_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" -) - -func TestEventsErrorInOptions(t *testing.T) { - errorCases := []struct { - options types.EventsOptions - expectedError string - }{ - { - options: types.EventsOptions{ - Since: "2006-01-02TZ", - }, - expectedError: `parsing time "2006-01-02TZ"`, - }, - { - options: types.EventsOptions{ - Until: "2006-01-02TZ", - }, - expectedError: `parsing time "2006-01-02TZ"`, - }, - } - for _, e := range errorCases { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, errs := client.Events(context.Background(), e.options) - err := <-errs - if err == nil || !strings.Contains(err.Error(), e.expectedError) { - t.Fatalf("expected an error %q, got %v", e.expectedError, err) - } - } -} - -func TestEventsErrorFromServer(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, errs := client.Events(context.Background(), types.EventsOptions{}) - err := <-errs - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestEvents(t *testing.T) { - - expectedURL := "/events" - - filters := filters.NewArgs() - filters.Add("type", events.ContainerEventType) - expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) - - eventsCases := []struct { - options types.EventsOptions - events []events.Message - expectedEvents map[string]bool - expectedQueryParams map[string]string - }{ - { - options: types.EventsOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": expectedFiltersJSON, - }, - events: []events.Message{}, - expectedEvents: make(map[string]bool), - }, - { - options: types.EventsOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": expectedFiltersJSON, - }, - events: []events.Message{ - { - Type: "container", - ID: "1", - Action: "create", - }, - { - Type: "container", - ID: "2", - Action: "die", - }, - { - Type: "container", - ID: "3", - Action: "create", - }, - }, - expectedEvents: map[string]bool{ - "1": true, - "2": true, - "3": true, - }, - }, - } - - for _, eventsCase := range eventsCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - - for key, expected := range eventsCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - - buffer := new(bytes.Buffer) - - for _, e := range eventsCase.events { - b, _ := json.Marshal(e) - buffer.Write(b) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(buffer), - }, nil - }), - } - - messages, errs := client.Events(context.Background(), eventsCase.options) - - loop: - for { - select { - case err := <-errs: - if err != nil && err != io.EOF { - t.Fatal(err) - } - - break loop - case e := <-messages: - _, ok := eventsCase.expectedEvents[e.ID] - if !ok { - t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) - } - } - } - } -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index dab3a756..0ac8248f 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -2,36 +2,20 @@ package client // import "github.com/docker/docker/client" import ( "bufio" + "context" "crypto/tls" "fmt" "net" "net/http" "net/http/httputil" "net/url" - "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" - "golang.org/x/net/context" ) -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) @@ -46,7 +30,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - conn, err := cli.setupHijackConn(req, "tcp") + conn, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err } @@ -54,96 +38,11 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := time.Until(dialer.Deadline) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsConfigClone(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) + return tls.Dial(proto, addr, tlsConfig) } if proto == "npipe" { return sockets.DialPipe(addr, 32*time.Second) @@ -151,12 +50,13 @@ func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { return net.Dial(proto, addr) } -func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + dialer := cli.Dialer() + conn, err := dialer(ctx) if err != nil { return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 97c9301c..9add3c10 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "io" @@ -9,8 +10,6 @@ import ( "strconv" "strings" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" ) @@ -31,12 +30,6 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return types.ImageBuildResponse{}, err - } - query.Set("platform", options.Platform) - } headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) @@ -132,7 +125,14 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("session", options.SessionID) } if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } query.Set("platform", strings.ToLower(options.Platform)) } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) return query, nil } diff --git a/vendor/github.com/docker/docker/client/image_build_test.go b/vendor/github.com/docker/docker/client/image_build_test.go deleted file mode 100644 index ef329cef..00000000 --- a/vendor/github.com/docker/docker/client/image_build_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/go-units" -) - -func TestImageBuildError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageBuild(t *testing.T) { - v1 := "value1" - v2 := "value2" - emptyRegistryConfig := "bnVsbA==" - buildCases := []struct { - buildOptions types.ImageBuildOptions - expectedQueryParams map[string]string - expectedTags []string - expectedRegistryConfig string - }{ - { - buildOptions: types.ImageBuildOptions{ - SuppressOutput: true, - NoCache: true, - Remove: true, - ForceRemove: true, - PullParent: true, - }, - expectedQueryParams: map[string]string{ - "q": "1", - "nocache": "1", - "rm": "1", - "forcerm": "1", - "pull": "1", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - SuppressOutput: false, - NoCache: false, - Remove: false, - ForceRemove: false, - PullParent: false, - }, - expectedQueryParams: map[string]string{ - "q": "", - "nocache": "", - "rm": "0", - "forcerm": "", - "pull": "", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - RemoteContext: "remoteContext", - Isolation: container.Isolation("isolation"), - CPUSetCPUs: "2", - CPUSetMems: "12", - CPUShares: 20, - CPUQuota: 10, - CPUPeriod: 30, - Memory: 256, - MemorySwap: 512, - ShmSize: 10, - CgroupParent: "cgroup_parent", - Dockerfile: "Dockerfile", - }, - expectedQueryParams: map[string]string{ - "remote": "remoteContext", - "isolation": "isolation", - "cpusetcpus": "2", - "cpusetmems": "12", - "cpushares": "20", - "cpuquota": "10", - "cpuperiod": "30", - "memory": "256", - "memswap": "512", - "shmsize": "10", - "cgroupparent": "cgroup_parent", - "dockerfile": "Dockerfile", - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - BuildArgs: map[string]*string{ - "ARG1": &v1, - "ARG2": &v2, - "ARG3": nil, - }, - }, - expectedQueryParams: map[string]string{ - "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - Ulimits: []*units.Ulimit{ - { - Name: "nproc", - Hard: 65557, - Soft: 65557, - }, - { - Name: "nofile", - Hard: 20000, - Soft: 40000, - }, - }, - }, - expectedQueryParams: map[string]string{ - "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - AuthConfigs: map[string]types.AuthConfig{ - "https://index.docker.io/v1/": { - Auth: "dG90bwo=", - }, - }, - }, - expectedQueryParams: map[string]string{ - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", - }, - } - for _, buildCase := range buildCases { - expectedURL := "/build" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - // Check request headers - registryConfig := r.Header.Get("X-Registry-Config") - if registryConfig != buildCase.expectedRegistryConfig { - return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) - } - contentType := r.Header.Get("Content-Type") - if contentType != "application/x-tar" { - return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/x-tar', got %s", contentType) - } - - // Check query parameters - query := r.URL.Query() - for key, expected := range buildCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - - // Check tags - if len(buildCase.expectedTags) > 0 { - tags := query["t"] - if !reflect.DeepEqual(tags, buildCase.expectedTags) { - return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) - } - } - - headers := http.Header{} - headers.Add("Server", "Docker/v1.23 (MyOS)") - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - Header: headers, - }, nil - }), - } - buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) - if err != nil { - t.Fatal(err) - } - if buildResponse.OSType != "MyOS" { - t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) - } - response, err := ioutil.ReadAll(buildResponse.Body) - if err != nil { - t.Fatal(err) - } - buildResponse.Body.Close() - if string(response) != "body" { - t.Fatalf("expected Body to contain 'body' string, got %s", response) - } - } -} - -func TestGetDockerOS(t *testing.T) { - cases := map[string]string{ - "Docker/v1.22 (linux)": "linux", - "Docker/v1.22 (windows)": "windows", - "Foo/v1.22 (bar)": "", - } - for header, os := range cases { - g := getDockerOS(header) - if g != os { - t.Fatalf("Expected %s, got %s", os, g) - } - } -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index fe237508..23938047 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -1,12 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "strings" - "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_create_test.go b/vendor/github.com/docker/docker/client/image_create_test.go deleted file mode 100644 index aa1d2734..00000000 --- a/vendor/github.com/docker/docker/client/image_create_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestImageCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageCreate(t *testing.T) { - expectedURL := "/images/create" - expectedImage := "test:5000/my_image" - expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) - expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - registryAuth := r.Header.Get("X-Registry-Auth") - if registryAuth != expectedRegistryAuth { - return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) - } - - query := r.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != expectedImage { - return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) - } - - tag := query.Get("tag") - if tag != expectedTag { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ - RegistryAuth: expectedRegistryAuth, - }) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(createResponse) - if err != nil { - t.Fatal(err) - } - if err = createResponse.Close(); err != nil { - t.Fatal(err) - } - if string(response) != "body" { - t.Fatalf("expected Body to contain 'body' string, got %s", response) - } -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index cf74723a..0151b951 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/image" - "golang.org/x/net/context" ) // ImageHistory returns the changes in an image in history format. diff --git a/vendor/github.com/docker/docker/client/image_history_test.go b/vendor/github.com/docker/docker/client/image_history_test.go deleted file mode 100644 index 6b2eca21..00000000 --- a/vendor/github.com/docker/docker/client/image_history_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/image" - "golang.org/x/net/context" -) - -func TestImageHistoryError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageHistory(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageHistory(t *testing.T) { - expectedURL := "/images/image_id/history" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - b, err := json.Marshal([]image.HistoryResponseItem{ - { - ID: "image_id1", - Tags: []string{"tag1", "tag2"}, - }, - { - ID: "image_id2", - Tags: []string{"tag1", "tag2"}, - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - imageHistories, err := client.ImageHistory(context.Background(), "image_id") - if err != nil { - t.Fatal(err) - } - if len(imageHistories) != 2 { - t.Fatalf("expected 2 containers, got %v", imageHistories) - } -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index dddbf9c6..c2972ea9 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -1,12 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "strings" - "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_import_test.go b/vendor/github.com/docker/docker/client/image_import_test.go deleted file mode 100644 index 5f304c1a..00000000 --- a/vendor/github.com/docker/docker/client/image_import_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestImageImportError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageImport(t *testing.T) { - expectedURL := "/images/create" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - query := r.URL.Query() - fromSrc := query.Get("fromSrc") - if fromSrc != "image_source" { - return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) - } - repo := query.Get("repo") - if repo != "repository_name:imported" { - return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name:imported', got %s", repo) - } - tag := query.Get("tag") - if tag != "imported" { - return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) - } - message := query.Get("message") - if message != "A message" { - return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) - } - changes := query["changes"] - expectedChanges := []string{"change1", "change2"} - if !reflect.DeepEqual(expectedChanges, changes) { - return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ - Source: strings.NewReader("source"), - SourceName: "image_source", - }, "repository_name:imported", types.ImageImportOptions{ - Tag: "imported", - Message: "A message", - Changes: []string{"change1", "change2"}, - }) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(importResponse) - if err != nil { - t.Fatal(err) - } - importResponse.Close() - if string(response) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(response)) - } -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 15811332..2f8f6d2f 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageInspectWithRaw returns the image information and its raw representation. diff --git a/vendor/github.com/docker/docker/client/image_inspect_test.go b/vendor/github.com/docker/docker/client/image_inspect_test.go deleted file mode 100644 index e42d251f..00000000 --- a/vendor/github.com/docker/docker/client/image_inspect_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestImageInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageInspectImageNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected an imageNotFound error, got %v", err) - } -} - -func TestImageInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.ImageInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestImageInspect(t *testing.T) { - expectedURL := "/images/image_id/json" - expectedTags := []string{"tag1", "tag2"} - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(types.ImageInspect{ - ID: "image_id", - RepoTags: expectedTags, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") - if err != nil { - t.Fatal(err) - } - if imageInspect.ID != "image_id" { - t.Fatalf("expected `image_id`, got %s", imageInspect.ID) - } - if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { - t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) - } -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 1e84a962..32fae27b 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // ImageList returns a list of images in the docker host. diff --git a/vendor/github.com/docker/docker/client/image_list_test.go b/vendor/github.com/docker/docker/client/image_list_test.go deleted file mode 100644 index 48e10fa1..00000000 --- a/vendor/github.com/docker/docker/client/image_list_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -func TestImageListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageList(context.Background(), types.ImageListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageList(t *testing.T) { - expectedURL := "/images/json" - - noDanglingfilters := filters.NewArgs() - noDanglingfilters.Add("dangling", "false") - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - filters.Add("dangling", "true") - - listCases := []struct { - options types.ImageListOptions - expectedQueryParams map[string]string - }{ - { - options: types.ImageListOptions{}, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": "", - }, - }, - { - options: types.ImageListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, - }, - }, - { - options: types.ImageListOptions{ - Filters: noDanglingfilters, - }, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"dangling":{"false":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]types.ImageSummary{ - { - ID: "image_id2", - }, - { - ID: "image_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - images, err := client.ImageList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(images) != 2 { - t.Fatalf("expected 2 images, got %v", images) - } - } -} - -func TestImageListApiBefore125(t *testing.T) { - expectedFilter := "image:tag" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - query := req.URL.Query() - actualFilter := query.Get("filter") - if actualFilter != expectedFilter { - return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) - } - actualFilters := query.Get("filters") - if actualFilters != "" { - return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) - } - content, err := json.Marshal([]types.ImageSummary{ - { - ID: "image_id2", - }, - { - ID: "image_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - version: "1.24", - } - - filters := filters.NewArgs() - filters.Add("reference", "image:tag") - - options := types.ImageListOptions{ - Filters: filters, - } - - images, err := client.ImageList(context.Background(), options) - if err != nil { - t.Fatal(err) - } - if len(images) != 2 { - t.Fatalf("expected 2 images, got %v", images) - } -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 411567c0..91016e49 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -1,11 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_load_test.go b/vendor/github.com/docker/docker/client/image_load_test.go deleted file mode 100644 index 63809fb0..00000000 --- a/vendor/github.com/docker/docker/client/image_load_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestImageLoadError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageLoad(context.Background(), nil, true) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageLoad(t *testing.T) { - expectedURL := "/images/load" - expectedInput := "inputBody" - expectedOutput := "outputBody" - loadCases := []struct { - quiet bool - responseContentType string - expectedResponseJSON bool - expectedQueryParams map[string]string - }{ - { - quiet: false, - responseContentType: "text/plain", - expectedResponseJSON: false, - expectedQueryParams: map[string]string{ - "quiet": "0", - }, - }, - { - quiet: true, - responseContentType: "application/json", - expectedResponseJSON: true, - expectedQueryParams: map[string]string{ - "quiet": "1", - }, - }, - } - for _, loadCase := range loadCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - contentType := req.Header.Get("Content-Type") - if contentType != "application/x-tar" { - return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) - } - query := req.URL.Query() - for key, expected := range loadCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - headers := http.Header{} - headers.Add("Content-Type", loadCase.responseContentType) - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), - Header: headers, - }, nil - }), - } - - input := bytes.NewReader([]byte(expectedInput)) - imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) - if err != nil { - t.Fatal(err) - } - if imageLoadResponse.JSON != loadCase.expectedResponseJSON { - t.Fatalf("expected a JSON response, was not.") - } - body, err := ioutil.ReadAll(imageLoadResponse.Body) - if err != nil { - t.Fatal(err) - } - if string(body) != expectedOutput { - t.Fatalf("expected %s, got %s", expectedOutput, string(body)) - } - } -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 32ea9098..78ee3f6c 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ImagesPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/image_prune_test.go b/vendor/github.com/docker/docker/client/image_prune_test.go deleted file mode 100644 index 9b161531..00000000 --- a/vendor/github.com/docker/docker/client/image_prune_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestImagesPruneError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - version: "1.25", - } - - filters := filters.NewArgs() - - _, err := client.ImagesPrune(context.Background(), filters) - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestImagesPrune(t *testing.T) { - expectedURL := "/v1.25/images/prune" - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - labelFilters := filters.NewArgs() - labelFilters.Add("dangling", "true") - labelFilters.Add("label", "label1=foo") - labelFilters.Add("label", "label2!=bar") - - listCases := []struct { - filters filters.Args - expectedQueryParams map[string]string - }{ - { - filters: filters.Args{}, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": "", - }, - }, - { - filters: danglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true}}`, - }, - }, - { - filters: noDanglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"false":true}}`, - }, - }, - { - filters: labelFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - assert.Check(t, is.Equal(expected, actual)) - } - content, err := json.Marshal(types.ImagesPruneReport{ - ImagesDeleted: []types.ImageDeleteResponseItem{ - { - Deleted: "image_id1", - }, - { - Deleted: "image_id2", - }, - }, - SpaceReclaimed: 9999, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - version: "1.25", - } - - report, err := client.ImagesPrune(context.Background(), listCase.filters) - assert.Check(t, err) - assert.Check(t, is.Len(report.ImagesDeleted, 2)) - assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) - } -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index ee5923a8..d97aacf8 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -1,13 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" "strings" - "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_pull_test.go b/vendor/github.com/docker/docker/client/image_pull_test.go deleted file mode 100644 index f23c131a..00000000 --- a/vendor/github.com/docker/docker/client/image_pull_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestImagePullReferenceParseError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, nil - }), - } - // An empty reference is an invalid reference - _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) - if err == nil || !strings.Contains(err.Error(), "invalid reference format") { - t.Fatalf("expected an error, got %v", err) - } -} - -func TestImagePullAnyError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImagePullStatusUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "", fmt.Errorf("Error requesting privilege") - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error requesting privilege" { - t.Fatalf("expected an error requesting privilege, got %v", err) - } -} - -func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "a-auth-header", nil - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { - expectedURL := "/images/create" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - auth := req.Header.Get("X-Registry-Auth") - if auth == "NotValid" { - return &http.Response{ - StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), - }, nil - } - if auth != "IAmValid" { - return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) - } - query := req.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != "myimage" { - return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) - } - tag := query.Get("tag") - if tag != "latest" { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), - }, nil - }), - } - privilegeFunc := func() (string, error) { - return "IAmValid", nil - } - resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - RegistryAuth: "NotValid", - PrivilegeFunc: privilegeFunc, - }) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != "hello world" { - t.Fatalf("expected 'hello world', got %s", string(body)) - } -} - -func TestImagePullWithoutErrors(t *testing.T) { - expectedURL := "/images/create" - expectedOutput := "hello world" - pullCases := []struct { - all bool - reference string - expectedImage string - expectedTag string - }{ - { - all: false, - reference: "myimage", - expectedImage: "myimage", - expectedTag: "latest", - }, - { - all: false, - reference: "myimage:tag", - expectedImage: "myimage", - expectedTag: "tag", - }, - { - all: true, - reference: "myimage", - expectedImage: "myimage", - expectedTag: "", - }, - { - all: true, - reference: "myimage:anything", - expectedImage: "myimage", - expectedTag: "", - }, - } - for _, pullCase := range pullCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != pullCase.expectedImage { - return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) - } - tag := query.Get("tag") - if tag != pullCase.expectedTag { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), - }, nil - }), - } - resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ - All: pullCase.all, - }) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != expectedOutput { - t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) - } - } -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 6fcc1626..a15871c2 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -1,13 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "errors" "io" "net/http" "net/url" - "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_push_test.go b/vendor/github.com/docker/docker/client/image_push_test.go deleted file mode 100644 index b006d4f3..00000000 --- a/vendor/github.com/docker/docker/client/image_push_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -func TestImagePushReferenceError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, nil - }), - } - // An empty reference is an invalid reference - _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) - if err == nil || !strings.Contains(err.Error(), "invalid reference format") { - t.Fatalf("expected an error, got %v", err) - } - // An canonical reference cannot be pushed - _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) - if err == nil || err.Error() != "cannot push a digest reference" { - t.Fatalf("expected an error, got %v", err) - } -} - -func TestImagePushAnyError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImagePushStatusUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "", fmt.Errorf("Error requesting privilege") - } - _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error requesting privilege" { - t.Fatalf("expected an error requesting privilege, got %v", err) - } -} - -func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "a-auth-header", nil - } - _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { - expectedURL := "/images/myimage/push" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - auth := req.Header.Get("X-Registry-Auth") - if auth == "NotValid" { - return &http.Response{ - StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), - }, nil - } - if auth != "IAmValid" { - return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) - } - query := req.URL.Query() - tag := query.Get("tag") - if tag != "tag" { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), - }, nil - }), - } - privilegeFunc := func() (string, error) { - return "IAmValid", nil - } - resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ - RegistryAuth: "NotValid", - PrivilegeFunc: privilegeFunc, - }) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != "hello world" { - t.Fatalf("expected 'hello world', got %s", string(body)) - } -} - -func TestImagePushWithoutErrors(t *testing.T) { - expectedOutput := "hello world" - expectedURLFormat := "/images/%s/push" - pullCases := []struct { - reference string - expectedImage string - expectedTag string - }{ - { - reference: "myimage", - expectedImage: "myimage", - expectedTag: "", - }, - { - reference: "myimage:tag", - expectedImage: "myimage", - expectedTag: "tag", - }, - } - for _, pullCase := range pullCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - tag := query.Get("tag") - if tag != pullCase.expectedTag { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), - }, nil - }), - } - resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != expectedOutput { - t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) - } - } -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 6d222a50..45d6e6f0 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageRemove removes an image from the docker host. diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go deleted file mode 100644 index a1686e64..00000000 --- a/vendor/github.com/docker/docker/client/image_remove_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestImageRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestImageRemoveImageNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "missing")), - } - - _, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{}) - assert.Check(t, is.Error(err, "Error: No such image: unknown")) - assert.Check(t, IsErrNotFound(err)) -} - -func TestImageRemove(t *testing.T) { - expectedURL := "/images/image_id" - removeCases := []struct { - force bool - pruneChildren bool - expectedQueryParams map[string]string - }{ - { - force: false, - pruneChildren: false, - expectedQueryParams: map[string]string{ - "force": "", - "noprune": "1", - }, - }, { - force: true, - pruneChildren: true, - expectedQueryParams: map[string]string{ - "force": "1", - "noprune": "", - }, - }, - } - for _, removeCase := range removeCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - query := req.URL.Query() - for key, expected := range removeCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - b, err := json.Marshal([]types.ImageDeleteResponseItem{ - { - Untagged: "image_id1", - }, - { - Deleted: "image_id", - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ - Force: removeCase.force, - PruneChildren: removeCase.pruneChildren, - }) - if err != nil { - t.Fatal(err) - } - if len(imageDeletes) != 2 { - t.Fatalf("expected 2 deleted images, got %v", imageDeletes) - } - } -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go index a9196159..d1314e4b 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -1,10 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. diff --git a/vendor/github.com/docker/docker/client/image_save_test.go b/vendor/github.com/docker/docker/client/image_save_test.go deleted file mode 100644 index bf9d643c..00000000 --- a/vendor/github.com/docker/docker/client/image_save_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "testing" - - "golang.org/x/net/context" - - "strings" -) - -func TestImageSaveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageSave(context.Background(), []string{"nothing"}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageSave(t *testing.T) { - expectedURL := "/images/get" - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - query := r.URL.Query() - names := query["names"] - expectedNames := []string{"image_id1", "image_id2"} - if !reflect.DeepEqual(names, expectedNames) { - return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(saveResponse) - if err != nil { - t.Fatal(err) - } - saveResponse.Close() - if string(response) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(response)) - } -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index ba5072ad..176de3c5 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/http" @@ -9,7 +10,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // ImageSearch makes the docker host to search by a term in a remote registry. diff --git a/vendor/github.com/docker/docker/client/image_search_test.go b/vendor/github.com/docker/docker/client/image_search_test.go deleted file mode 100644 index d4f9fff1..00000000 --- a/vendor/github.com/docker/docker/client/image_search_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" -) - -func TestImageSearchAnyError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageSearchStatusUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "", fmt.Errorf("Error requesting privilege") - } - _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error requesting privilege" { - t.Fatalf("expected an error requesting privilege, got %v", err) - } -} - -func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "a-auth-header", nil - } - _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected an Unauthorized Error, got %v", err) - } -} - -func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { - expectedURL := "/images/search" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - auth := req.Header.Get("X-Registry-Auth") - if auth == "NotValid" { - return &http.Response{ - StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), - }, nil - } - if auth != "IAmValid" { - return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) - } - query := req.URL.Query() - term := query.Get("term") - if term != "some-image" { - return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) - } - content, err := json.Marshal([]registry.SearchResult{ - { - Name: "anything", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - privilegeFunc := func() (string, error) { - return "IAmValid", nil - } - results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ - RegistryAuth: "NotValid", - PrivilegeFunc: privilegeFunc, - }) - if err != nil { - t.Fatal(err) - } - if len(results) != 1 { - t.Fatalf("expected 1 result, got %v", results) - } -} - -func TestImageSearchWithoutErrors(t *testing.T) { - expectedURL := "/images/search" - filterArgs := filters.NewArgs() - filterArgs.Add("is-automated", "true") - filterArgs.Add("stars", "3") - - expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - term := query.Get("term") - if term != "some-image" { - return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) - } - filters := query.Get("filters") - if filters != expectedFilters { - return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) - } - content, err := json.Marshal([]registry.SearchResult{ - { - Name: "anything", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ - Filters: filterArgs, - }) - if err != nil { - t.Fatal(err) - } - if len(results) != 1 { - t.Fatalf("expected a result, got %v", results) - } -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index 4399a3e9..5652bfc2 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/distribution/reference" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ImageTag tags an image in the docker host diff --git a/vendor/github.com/docker/docker/client/image_tag_test.go b/vendor/github.com/docker/docker/client/image_tag_test.go deleted file mode 100644 index b8660c92..00000000 --- a/vendor/github.com/docker/docker/client/image_tag_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestImageTagError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ImageTag(context.Background(), "image_id", "repo:tag") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -// Note: this is not testing all the InvalidReference as it's the responsibility -// of distribution/reference package. -func TestImageTagInvalidReference(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") - if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag: invalid reference format` { - t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) - } -} - -func TestImageTagInvalidSourceImageName(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag") - if err == nil || err.Error() != "Error parsing reference: \"invalid_source_image_name_\" is not a valid repository/tag: invalid reference format" { - t.Fatalf("expected Parsing Reference Error, got %v", err) - } -} - -func TestImageTagHexSource(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusOK, "OK")), - } - - err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag") - if err != nil { - t.Fatalf("got error: %v", err) - } -} - -func TestImageTag(t *testing.T) { - expectedURL := "/images/image_id/tag" - tagCases := []struct { - reference string - expectedQueryParams map[string]string - }{ - { - reference: "repository:tag1", - expectedQueryParams: map[string]string{ - "repo": "repository", - "tag": "tag1", - }, - }, { - reference: "another_repository:latest", - expectedQueryParams: map[string]string{ - "repo": "another_repository", - "tag": "latest", - }, - }, { - reference: "another_repository", - expectedQueryParams: map[string]string{ - "repo": "another_repository", - "tag": "latest", - }, - }, { - reference: "test/another_repository", - expectedQueryParams: map[string]string{ - "repo": "test/another_repository", - "tag": "latest", - }, - }, { - reference: "test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "repo": "test/another_repository", - "tag": "tag1", - }, - }, { - reference: "test/test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "repo": "test/test/another_repository", - "tag": "tag1", - }, - }, { - reference: "test:5000/test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "repo": "test:5000/test/another_repository", - "tag": "tag1", - }, - }, { - reference: "test:5000/test/another_repository", - expectedQueryParams: map[string]string{ - "repo": "test:5000/test/another_repository", - "tag": "latest", - }, - }, - } - for _, tagCase := range tagCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - query := req.URL.Query() - for key, expected := range tagCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ImageTag(context.Background(), "image_id", tagCase.reference) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index 3f6746c9..121f256a 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // Info returns information about the docker server. diff --git a/vendor/github.com/docker/docker/client/info_test.go b/vendor/github.com/docker/docker/client/info_test.go deleted file mode 100644 index de06ef7a..00000000 --- a/vendor/github.com/docker/docker/client/info_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestInfoServerError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.Info(context.Background()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestInfoInvalidResponseJSONError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), - }, nil - }), - } - _, err := client.Info(context.Background()) - if err == nil || !strings.Contains(err.Error(), "invalid character") { - t.Fatalf("expected a 'invalid character' error, got %v", err) - } -} - -func TestInfo(t *testing.T) { - expectedURL := "/info" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - info := &types.Info{ - ID: "daemonID", - Containers: 3, - } - b, err := json.Marshal(info) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - info, err := client.Info(context.Background()) - if err != nil { - t.Fatal(err) - } - - if info.ID != "daemonID" { - t.Fatalf("expected daemonID, got %s", info.ID) - } - - if info.Containers != 3 { - t.Fatalf("expected 3 containers, got %d", info.Containers) - } -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 8517546a..d190f8e5 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -1,8 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net" + "net/http" "time" "github.com/docker/docker/api/types" @@ -14,7 +16,6 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. @@ -33,10 +34,12 @@ type CommonAPIClient interface { VolumeAPIClient ClientVersion() string DaemonHost() string + HTTPClient() *http.Client ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) Close() error } @@ -83,7 +86,8 @@ type DistributionAPIClient interface { // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) @@ -168,10 +172,10 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) } diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 236c1d6c..402ffb51 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -1,8 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) type apiClientExperimental interface { diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index fefb101a..7d661819 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // RegistryLogin authenticates the docker server with a given docker registry. diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index af4a6165..57189461 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -1,9 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" ) // NetworkConnect connects a container to an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_connect_test.go b/vendor/github.com/docker/docker/client/network_connect_test.go deleted file mode 100644 index 28b1c006..00000000 --- a/vendor/github.com/docker/docker/client/network_connect_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -func TestNetworkConnectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { - expectedURL := "/networks/network_id/connect" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var connect types.NetworkConnect - if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { - return nil, err - } - - if connect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) - } - - if connect.EndpointConfig != nil { - return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) - if err != nil { - t.Fatal(err) - } -} - -func TestNetworkConnect(t *testing.T) { - expectedURL := "/networks/network_id/connect" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var connect types.NetworkConnect - if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { - return nil, err - } - - if connect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) - } - - if connect.EndpointConfig == nil { - return nil, fmt.Errorf("expected connect.EndpointConfig to be not nil, got %v", connect.EndpointConfig) - } - - if connect.EndpointConfig.NetworkID != "NetworkID" { - return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ - NetworkID: "NetworkID", - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index fa1d301a..41da2ac6 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkCreate creates a new network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_create_test.go b/vendor/github.com/docker/docker/client/network_create_test.go deleted file mode 100644 index eea5109e..00000000 --- a/vendor/github.com/docker/docker/client/network_create_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestNetworkCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkCreate(t *testing.T) { - expectedURL := "/networks/create" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - content, err := json.Marshal(types.NetworkCreateResponse{ - ID: "network_id", - Warning: "warning", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ - CheckDuplicate: true, - Driver: "mydriver", - EnableIPv6: true, - Internal: true, - Options: map[string]string{ - "opt-key": "opt-value", - }, - }) - if err != nil { - t.Fatal(err) - } - if networkResponse.ID != "network_id" { - t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) - } - if networkResponse.Warning != "warning" { - t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) - } -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index 5fc75a94..dd156766 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -1,8 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_disconnect_test.go b/vendor/github.com/docker/docker/client/network_disconnect_test.go deleted file mode 100644 index e092ccf2..00000000 --- a/vendor/github.com/docker/docker/client/network_disconnect_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestNetworkDisconnectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkDisconnect(t *testing.T) { - expectedURL := "/networks/network_id/disconnect" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var disconnect types.NetworkDisconnect - if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { - return nil, err - } - - if disconnect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) - } - - if !disconnect.Force { - return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 0e18caf1..025f6d87 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkInspect returns the information for a specific network configured in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go deleted file mode 100644 index 8778021e..00000000 --- a/vendor/github.com/docker/docker/client/network_inspect_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestNetworkInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestNetworkInspectNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "missing")), - } - - _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) - assert.Check(t, is.Error(err, "Error: No such network: unknown")) - assert.Check(t, IsErrNotFound(err)) -} - -func TestNetworkInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.NetworkInspectWithRaw(context.Background(), "", types.NetworkInspectOptions{}) - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestNetworkInspect(t *testing.T) { - expectedURL := "/networks/network_id" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - - var ( - content []byte - err error - ) - if strings.Contains(req.URL.RawQuery, "scope=global") { - return &http.Response{ - StatusCode: http.StatusNotFound, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - } - - if strings.Contains(req.URL.RawQuery, "verbose=true") { - s := map[string]network.ServiceInfo{ - "web": {}, - } - content, err = json.Marshal(types.NetworkResource{ - Name: "mynetwork", - Services: s, - }) - } else { - content, err = json.Marshal(types.NetworkResource{ - Name: "mynetwork", - }) - } - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{}) - if err != nil { - t.Fatal(err) - } - if r.Name != "mynetwork" { - t.Fatalf("expected `mynetwork`, got %s", r.Name) - } - - r, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Verbose: true}) - if err != nil { - t.Fatal(err) - } - if r.Name != "mynetwork" { - t.Fatalf("expected `mynetwork`, got %s", r.Name) - } - _, ok := r.Services["web"] - if !ok { - t.Fatalf("expected service `web` missing in the verbose output") - } - - _, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Scope: "global"}) - assert.Check(t, is.Error(err, "Error: No such network: network_id")) -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index 9b424285..f16b2f56 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworkList returns the list of networks configured in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_list_test.go b/vendor/github.com/docker/docker/client/network_list_test.go deleted file mode 100644 index 2604894c..00000000 --- a/vendor/github.com/docker/docker/client/network_list_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -func TestNetworkListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ - Filters: filters.NewArgs(), - }) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkList(t *testing.T) { - expectedURL := "/networks" - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - labelFilters := filters.NewArgs() - labelFilters.Add("label", "label1") - labelFilters.Add("label", "label2") - - listCases := []struct { - options types.NetworkListOptions - expectedFilters string - }{ - { - options: types.NetworkListOptions{ - Filters: filters.NewArgs(), - }, - expectedFilters: "", - }, { - options: types.NetworkListOptions{ - Filters: noDanglingFilters, - }, - expectedFilters: `{"dangling":{"false":true}}`, - }, { - options: types.NetworkListOptions{ - Filters: danglingFilters, - }, - expectedFilters: `{"dangling":{"true":true}}`, - }, { - options: types.NetworkListOptions{ - Filters: labelFilters, - }, - expectedFilters: `{"label":{"label1":true,"label2":true}}`, - }, - } - - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - query := req.URL.Query() - actualFilters := query.Get("filters") - if actualFilters != listCase.expectedFilters { - return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) - } - content, err := json.Marshal([]types.NetworkResource{ - { - Name: "network", - Driver: "bridge", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - networkResources, err := client.NetworkList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(networkResources) != 1 { - t.Fatalf("expected 1 network resource, got %v", networkResources) - } - } -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index d546f0c3..6418b8b6 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworksPrune requests the daemon to delete unused networks diff --git a/vendor/github.com/docker/docker/client/network_prune_test.go b/vendor/github.com/docker/docker/client/network_prune_test.go deleted file mode 100644 index 85908f0c..00000000 --- a/vendor/github.com/docker/docker/client/network_prune_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestNetworksPruneError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - version: "1.25", - } - - filters := filters.NewArgs() - - _, err := client.NetworksPrune(context.Background(), filters) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworksPrune(t *testing.T) { - expectedURL := "/v1.25/networks/prune" - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - labelFilters := filters.NewArgs() - labelFilters.Add("dangling", "true") - labelFilters.Add("label", "label1=foo") - labelFilters.Add("label", "label2!=bar") - - listCases := []struct { - filters filters.Args - expectedQueryParams map[string]string - }{ - { - filters: filters.Args{}, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": "", - }, - }, - { - filters: danglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true}}`, - }, - }, - { - filters: noDanglingFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"false":true}}`, - }, - }, - { - filters: labelFilters, - expectedQueryParams: map[string]string{ - "until": "", - "filter": "", - "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - assert.Check(t, is.Equal(expected, actual)) - } - content, err := json.Marshal(types.NetworksPruneReport{ - NetworksDeleted: []string{"network_id1", "network_id2"}, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - version: "1.25", - } - - report, err := client.NetworksPrune(context.Background(), listCase.filters) - assert.Check(t, err) - assert.Check(t, is.Len(report.NetworksDeleted, 2)) - } -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index c99de10c..12741437 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { diff --git a/vendor/github.com/docker/docker/client/network_remove_test.go b/vendor/github.com/docker/docker/client/network_remove_test.go deleted file mode 100644 index c8c91015..00000000 --- a/vendor/github.com/docker/docker/client/network_remove_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestNetworkRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkRemove(context.Background(), "network_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkRemove(t *testing.T) { - expectedURL := "/networks/network_id" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.NetworkRemove(context.Background(), "network_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index 7c0c1d47..593b2e9f 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeInspectWithRaw returns the node information. diff --git a/vendor/github.com/docker/docker/client/node_inspect_test.go b/vendor/github.com/docker/docker/client/node_inspect_test.go deleted file mode 100644 index efda2877..00000000 --- a/vendor/github.com/docker/docker/client/node_inspect_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestNodeInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNodeInspectNodeNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a nodeNotFoundError error, got %v", err) - } -} - -func TestNodeInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.NodeInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestNodeInspect(t *testing.T) { - expectedURL := "/nodes/node_id" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Node{ - ID: "node_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") - if err != nil { - t.Fatal(err) - } - if nodeInspect.ID != "node_id" { - t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index c613ad21..9883f6fc 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeList returns the list of nodes. diff --git a/vendor/github.com/docker/docker/client/node_list_test.go b/vendor/github.com/docker/docker/client/node_list_test.go deleted file mode 100644 index d227d98f..00000000 --- a/vendor/github.com/docker/docker/client/node_list_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -func TestNodeListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NodeList(context.Background(), types.NodeListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNodeList(t *testing.T) { - expectedURL := "/nodes" - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - - listCases := []struct { - options types.NodeListOptions - expectedQueryParams map[string]string - }{ - { - options: types.NodeListOptions{}, - expectedQueryParams: map[string]string{ - "filters": "", - }, - }, - { - options: types.NodeListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": `{"label":{"label1":true,"label2":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]swarm.Node{ - { - ID: "node_id1", - }, - { - ID: "node_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - nodes, err := client.NodeList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(nodes) != 2 { - t.Fatalf("expected 2 nodes, got %v", nodes) - } - } -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 7c1a757d..e7a75057 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -1,11 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" ) // NodeRemove removes a Node. diff --git a/vendor/github.com/docker/docker/client/node_remove_test.go b/vendor/github.com/docker/docker/client/node_remove_test.go deleted file mode 100644 index f2921d88..00000000 --- a/vendor/github.com/docker/docker/client/node_remove_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - - "golang.org/x/net/context" -) - -func TestNodeRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNodeRemove(t *testing.T) { - expectedURL := "/nodes/node_id" - - removeCases := []struct { - force bool - expectedForce string - }{ - { - expectedForce: "", - }, - { - force: true, - expectedForce: "1", - }, - } - - for _, removeCase := range removeCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - force := req.URL.Query().Get("force") - if force != removeCase.expectedForce { - return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index 0b528a47..de32a617 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeUpdate updates a Node. diff --git a/vendor/github.com/docker/docker/client/node_update_test.go b/vendor/github.com/docker/docker/client/node_update_test.go deleted file mode 100644 index f4db3ae0..00000000 --- a/vendor/github.com/docker/docker/client/node_update_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" -) - -func TestNodeUpdateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNodeUpdate(t *testing.T) { - expectedURL := "/nodes/node_id/update" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index e2366276..dec1423e 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "path" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) -// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +// Ping pings the server and returns the value of the "Docker-Experimental", "Builder-Version", "OS-Type" & "API-Version" headers func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) @@ -27,6 +27,9 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { ping.Experimental = true } ping.OSType = serverResp.header.Get("OSType") + if bv := serverResp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } } return ping, cli.checkResponseErr(serverResp) } diff --git a/vendor/github.com/docker/docker/client/ping_test.go b/vendor/github.com/docker/docker/client/ping_test.go deleted file mode 100644 index f83233ac..00000000 --- a/vendor/github.com/docker/docker/client/ping_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "errors" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -// TestPingFail tests that when a server sends a non-successful response that we -// can still grab API details, when set. -// Some of this is just excercising the code paths to make sure there are no -// panics. -func TestPingFail(t *testing.T) { - var withHeader bool - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - resp := &http.Response{StatusCode: http.StatusInternalServerError} - if withHeader { - resp.Header = http.Header{} - resp.Header.Set("API-Version", "awesome") - resp.Header.Set("Docker-Experimental", "true") - } - resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) - return resp, nil - }), - } - - ping, err := client.Ping(context.Background()) - assert.Check(t, is.ErrorContains(err, "")) - assert.Check(t, is.Equal(false, ping.Experimental)) - assert.Check(t, is.Equal("", ping.APIVersion)) - - withHeader = true - ping2, err := client.Ping(context.Background()) - assert.Check(t, is.ErrorContains(err, "")) - assert.Check(t, is.Equal(true, ping2.Experimental)) - assert.Check(t, is.Equal("awesome", ping2.APIVersion)) -} - -// TestPingWithError tests the case where there is a protocol error in the ping. -// This test is mostly just testing that there are no panics in this code path. -func TestPingWithError(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - resp := &http.Response{StatusCode: http.StatusInternalServerError} - resp.Header = http.Header{} - resp.Header.Set("API-Version", "awesome") - resp.Header.Set("Docker-Experimental", "true") - resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) - return resp, errors.New("some error") - }), - } - - ping, err := client.Ping(context.Background()) - assert.Check(t, is.ErrorContains(err, "")) - assert.Check(t, is.Equal(false, ping.Experimental)) - assert.Check(t, is.Equal("", ping.APIVersion)) -} - -// TestPingSuccess tests that we are able to get the expected API headers/ping -// details on success. -func TestPingSuccess(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - resp := &http.Response{StatusCode: http.StatusInternalServerError} - resp.Header = http.Header{} - resp.Header.Set("API-Version", "awesome") - resp.Header.Set("Docker-Experimental", "true") - resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) - return resp, nil - }), - } - ping, err := client.Ping(context.Background()) - assert.Check(t, is.ErrorContains(err, "")) - assert.Check(t, is.Equal(true, ping.Experimental)) - assert.Check(t, is.Equal("awesome", ping.APIVersion)) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go index 51ef9c99..4591db50 100644 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginCreate creates a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 8d8c41d2..01f6574f 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginDisable disables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_disable_test.go b/vendor/github.com/docker/docker/client/plugin_disable_test.go deleted file mode 100644 index 104c6f88..00000000 --- a/vendor/github.com/docker/docker/client/plugin_disable_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestPluginDisableError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginDisable(t *testing.T) { - expectedURL := "/plugins/plugin_name/disable" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 1f0f8f73..736da48b 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginEnable enables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_enable_test.go b/vendor/github.com/docker/docker/client/plugin_enable_test.go deleted file mode 100644 index 64bb2387..00000000 --- a/vendor/github.com/docker/docker/client/plugin_enable_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -func TestPluginEnableError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginEnable(t *testing.T) { - expectedURL := "/plugins/plugin_name/enable" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 983b1090..0ab7beae 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginInspectWithRaw inspects an existing plugin diff --git a/vendor/github.com/docker/docker/client/plugin_inspect_test.go b/vendor/github.com/docker/docker/client/plugin_inspect_test.go deleted file mode 100644 index eada16db..00000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestPluginInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.PluginInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestPluginInspect(t *testing.T) { - expectedURL := "/plugins/plugin_name" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(types.Plugin{ - ID: "plugin_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") - if err != nil { - t.Fatal(err) - } - if pluginInspect.ID != "plugin_id" { - t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index ae04387d..13baa40a 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "io" "net/http" @@ -9,7 +10,6 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginInstall installs a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index e4987662..ade1051a 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // PluginList returns the installed plugins diff --git a/vendor/github.com/docker/docker/client/plugin_list_test.go b/vendor/github.com/docker/docker/client/plugin_list_test.go deleted file mode 100644 index bad5dd13..00000000 --- a/vendor/github.com/docker/docker/client/plugin_list_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -func TestPluginListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.PluginList(context.Background(), filters.NewArgs()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginList(t *testing.T) { - expectedURL := "/plugins" - - enabledFilters := filters.NewArgs() - enabledFilters.Add("enabled", "true") - - capabilityFilters := filters.NewArgs() - capabilityFilters.Add("capability", "volumedriver") - capabilityFilters.Add("capability", "authz") - - listCases := []struct { - filters filters.Args - expectedQueryParams map[string]string - }{ - { - filters: filters.NewArgs(), - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": "", - }, - }, - { - filters: enabledFilters, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"enabled":{"true":true}}`, - }, - }, - { - filters: capabilityFilters, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"capability":{"authz":true,"volumedriver":true}}`, - }, - }, - } - - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]*types.Plugin{ - { - ID: "plugin_id1", - }, - { - ID: "plugin_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - plugins, err := client.PluginList(context.Background(), listCase.filters) - if err != nil { - t.Fatal(err) - } - if len(plugins) != 2 { - t.Fatalf("expected 2 plugins, got %v", plugins) - } - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 33b38d84..d20bfe84 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" - - "golang.org/x/net/context" ) // PluginPush pushes a plugin to a registry diff --git a/vendor/github.com/docker/docker/client/plugin_push_test.go b/vendor/github.com/docker/docker/client/plugin_push_test.go deleted file mode 100644 index 8c5a4508..00000000 --- a/vendor/github.com/docker/docker/client/plugin_push_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestPluginPushError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.PluginPush(context.Background(), "plugin_name", "") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginPush(t *testing.T) { - expectedURL := "/plugins/plugin_name" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - auth := req.Header.Get("X-Registry-Auth") - if auth != "authtoken" { - return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index b9e7d8e4..8563bab0 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginRemove removes a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_remove_test.go b/vendor/github.com/docker/docker/client/plugin_remove_test.go deleted file mode 100644 index adcc3c12..00000000 --- a/vendor/github.com/docker/docker/client/plugin_remove_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - - "golang.org/x/net/context" -) - -func TestPluginRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginRemove(t *testing.T) { - expectedURL := "/plugins/plugin_name" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index de72cc0d..dcf5752c 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,7 +1,7 @@ package client // import "github.com/docker/docker/client" import ( - "golang.org/x/net/context" + "context" ) // PluginSet modifies settings for an existing plugin diff --git a/vendor/github.com/docker/docker/client/plugin_set_test.go b/vendor/github.com/docker/docker/client/plugin_set_test.go deleted file mode 100644 index de2cc431..00000000 --- a/vendor/github.com/docker/docker/client/plugin_set_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestPluginSetError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.PluginSet(context.Background(), "plugin_name", []string{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestPluginSet(t *testing.T) { - expectedURL := "/plugins/plugin_name/set" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 33b740ca..115cea94 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginUpgrade upgrades a plugin diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 302f599f..a19d62aa 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -2,6 +2,7 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -15,7 +16,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go deleted file mode 100644 index 1a0a87e2..00000000 --- a/vendor/github.com/docker/docker/client/request_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/gotestyourself/gotestyourself/assert" - "golang.org/x/net/context" -) - -// TestSetHostHeader should set fake host for local communications, set real host -// for normal communications. -func TestSetHostHeader(t *testing.T) { - testURL := "/test" - testCases := []struct { - host string - expectedHost string - expectedURLHost string - }{ - { - "unix:///var/run/docker.sock", - "docker", - "/var/run/docker.sock", - }, - { - "npipe:////./pipe/docker_engine", - "docker", - "//./pipe/docker_engine", - }, - { - "tcp://0.0.0.0:4243", - "", - "0.0.0.0:4243", - }, - { - "tcp://localhost:4243", - "", - "localhost:4243", - }, - } - - for c, test := range testCases { - hostURL, err := ParseHostURL(test.host) - assert.NilError(t, err) - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, testURL) { - return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) - } - if req.Host != test.expectedHost { - return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) - } - if req.URL.Host != test.expectedURLHost { - return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), - }, nil - }), - - proto: hostURL.Scheme, - addr: hostURL.Host, - basePath: hostURL.Path, - } - - _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) - assert.NilError(t, err) - } -} - -// TestPlainTextError tests the server returning an error in plain text for -// backwards compatibility with API versions <1.24. All other tests use -// errors returned as JSON -func TestPlainTextError(t *testing.T) { - client := &Client{ - client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index fa639caf..09fae82f 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretCreate creates a new Secret. diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go deleted file mode 100644 index b31cab50..00000000 --- a/vendor/github.com/docker/docker/client/secret_create_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestSecretCreateUnsupported(t *testing.T) { - client := &Client{ - version: "1.24", - client: &http.Client{}, - } - _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) - assert.Check(t, is.Error(err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`)) -} - -func TestSecretCreateError(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSecretCreate(t *testing.T) { - expectedURL := "/v1.25/secrets/create" - client := &Client{ - version: "1.25", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - b, err := json.Marshal(types.SecretCreateResponse{ - ID: "test_secret", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusCreated, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) - if err != nil { - t.Fatal(err) - } - if r.ID != "test_secret" { - t.Fatalf("expected `test_secret`, got %s", r.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index 9342e3bc..e8322f45 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretInspectWithRaw returns the secret information with raw data diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go deleted file mode 100644 index 0bb3ae24..00000000 --- a/vendor/github.com/docker/docker/client/secret_inspect_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestSecretInspectUnsupported(t *testing.T) { - client := &Client{ - version: "1.24", - client: &http.Client{}, - } - _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") - assert.Check(t, is.Error(err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`)) -} - -func TestSecretInspectError(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSecretInspectSecretNotFound(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a secretNotFoundError error, got %v", err) - } -} - -func TestSecretInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.SecretInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestSecretInspect(t *testing.T) { - expectedURL := "/v1.25/secrets/secret_id" - client := &Client{ - version: "1.25", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Secret{ - ID: "secret_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") - if err != nil { - t.Fatal(err) - } - if secretInspect.ID != "secret_id" { - t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index adf4d92b..f6bf7ba4 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretList returns the list of secrets. diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go deleted file mode 100644 index 36d8e8e2..00000000 --- a/vendor/github.com/docker/docker/client/secret_list_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestSecretListUnsupported(t *testing.T) { - client := &Client{ - version: "1.24", - client: &http.Client{}, - } - _, err := client.SecretList(context.Background(), types.SecretListOptions{}) - assert.Check(t, is.Error(err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`)) -} - -func TestSecretListError(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.SecretList(context.Background(), types.SecretListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSecretList(t *testing.T) { - expectedURL := "/v1.25/secrets" - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - - listCases := []struct { - options types.SecretListOptions - expectedQueryParams map[string]string - }{ - { - options: types.SecretListOptions{}, - expectedQueryParams: map[string]string{ - "filters": "", - }, - }, - { - options: types.SecretListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": `{"label":{"label1":true,"label2":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - version: "1.25", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]swarm.Secret{ - { - ID: "secret_id1", - }, - { - ID: "secret_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - secrets, err := client.SecretList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(secrets) != 2 { - t.Fatalf("expected 2 secrets, got %v", secrets) - } - } -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 89326e09..e9d52182 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go deleted file mode 100644 index 37c22650..00000000 --- a/vendor/github.com/docker/docker/client/secret_remove_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestSecretRemoveUnsupported(t *testing.T) { - client := &Client{ - version: "1.24", - client: &http.Client{}, - } - err := client.SecretRemove(context.Background(), "secret_id") - assert.Check(t, is.Error(err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`)) -} - -func TestSecretRemoveError(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SecretRemove(context.Background(), "secret_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSecretRemove(t *testing.T) { - expectedURL := "/v1.25/secrets/secret_id" - - client := &Client{ - version: "1.25", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.SecretRemove(context.Background(), "secret_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 8efd35c1..164256bb 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -1,11 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretUpdate attempts to update a Secret diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go deleted file mode 100644 index 3ff172ba..00000000 --- a/vendor/github.com/docker/docker/client/secret_update_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestSecretUpdateUnsupported(t *testing.T) { - client := &Client{ - version: "1.24", - client: &http.Client{}, - } - err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) - assert.Check(t, is.Error(err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`)) -} - -func TestSecretUpdateError(t *testing.T) { - client := &Client{ - version: "1.25", - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSecretUpdate(t *testing.T) { - expectedURL := "/v1.25/secrets/secret_id/update" - - client := &Client{ - version: "1.25", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index cf85ca4a..8fadda4a 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "strings" @@ -8,9 +9,8 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ServiceCreate creates a new Service. @@ -136,7 +136,7 @@ func imageWithDigestString(image string, dgst digest.Digest) string { // imageWithTagString takes an image string, and returns a tagged image // string, adding a 'latest' tag if one was not provided. It returns an -// emptry string if a canonical reference was provided +// empty string if a canonical reference was provided func imageWithTagString(image string) string { namedRef, err := reference.ParseNormalizedNamed(image) if err == nil { diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go deleted file mode 100644 index c5d8ae4f..00000000 --- a/vendor/github.com/docker/docker/client/service_create_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/net/context" -) - -func TestServiceCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestServiceCreate(t *testing.T) { - expectedURL := "/services/create" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - b, err := json.Marshal(types.ServiceCreateResponse{ - ID: "service_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) - if err != nil { - t.Fatal(err) - } - if r.ID != "service_id" { - t.Fatalf("expected `service_id`, got %s", r.ID) - } -} - -func TestServiceCreateCompatiblePlatforms(t *testing.T) { - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { - var serviceSpec swarm.ServiceSpec - - // check if the /distribution endpoint returned correct output - err := json.NewDecoder(req.Body).Decode(&serviceSpec) - if err != nil { - return nil, err - } - - assert.Check(t, is.Equal("foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image)) - assert.Check(t, is.Len(serviceSpec.TaskTemplate.Placement.Platforms, 1)) - - p := serviceSpec.TaskTemplate.Placement.Platforms[0] - b, err := json.Marshal(types.ServiceCreateResponse{ - ID: "service_" + p.OS + "_" + p.Architecture, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { - b, err := json.Marshal(registrytypes.DistributionInspect{ - Descriptor: v1.Descriptor{ - Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", - }, - Platforms: []v1.Platform{ - { - Architecture: "amd64", - OS: "linux", - }, - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - } else { - return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) - } - }), - } - - spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: &swarm.ContainerSpec{Image: "foobar:1.0"}}} - - r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true}) - assert.Check(t, err) - assert.Check(t, is.Equal("service_linux_amd64", r.ID)) -} - -func TestServiceCreateDigestPinning(t *testing.T) { - dgst := "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - dgstAlt := "sha256:37ffbf3f7497c07584dc9637ffbf3f7497c0758c0537ffbf3f7497c0c88e2bb7" - serviceCreateImage := "" - pinByDigestTests := []struct { - img string // input image provided by the user - expected string // expected image after digest pinning - }{ - // default registry returns familiar string - {"docker.io/library/alpine", "alpine:latest@" + dgst}, - // provided tag is preserved and digest added - {"alpine:edge", "alpine:edge@" + dgst}, - // image with provided alternative digest remains unchanged - {"alpine@" + dgstAlt, "alpine@" + dgstAlt}, - // image with provided tag and alternative digest remains unchanged - {"alpine:edge@" + dgstAlt, "alpine:edge@" + dgstAlt}, - // image on alternative registry does not result in familiar string - {"alternate.registry/library/alpine", "alternate.registry/library/alpine:latest@" + dgst}, - // unresolvable image does not get a digest - {"cannotresolve", "cannotresolve:latest"}, - } - - client := &Client{ - version: "1.30", - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { - // reset and set image received by the service create endpoint - serviceCreateImage = "" - var service swarm.ServiceSpec - if err := json.NewDecoder(req.Body).Decode(&service); err != nil { - return nil, fmt.Errorf("could not parse service create request") - } - serviceCreateImage = service.TaskTemplate.ContainerSpec.Image - - b, err := json.Marshal(types.ServiceCreateResponse{ - ID: "service_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/cannotresolve") { - // unresolvable image - return nil, fmt.Errorf("cannot resolve image") - } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { - // resolvable images - b, err := json.Marshal(registrytypes.DistributionInspect{ - Descriptor: v1.Descriptor{ - Digest: digest.Digest(dgst), - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - } - return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) - }), - } - - // run pin by digest tests - for _, p := range pinByDigestTests { - r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{ - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{ - Image: p.img, - }, - }, - }, types.ServiceCreateOptions{QueryRegistry: true}) - - if err != nil { - t.Fatal(err) - } - - if r.ID != "service_id" { - t.Fatalf("expected `service_id`, got %s", r.ID) - } - - if p.expected != serviceCreateImage { - t.Fatalf("expected image %s, got %s", p.expected, serviceCreateImage) - } - } -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index b4c29442..de6aa22d 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -2,6 +2,7 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,7 +10,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceInspectWithRaw returns the service information and the raw data. diff --git a/vendor/github.com/docker/docker/client/service_inspect_test.go b/vendor/github.com/docker/docker/client/service_inspect_test.go deleted file mode 100644 index be5165a1..00000000 --- a/vendor/github.com/docker/docker/client/service_inspect_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestServiceInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestServiceInspectServiceNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{}) - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected a serviceNotFoundError error, got %v", err) - } -} - -func TestServiceInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.ServiceInspectWithRaw(context.Background(), "", types.ServiceInspectOptions{}) - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestServiceInspect(t *testing.T) { - expectedURL := "/services/service_id" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Service{ - ID: "service_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{}) - if err != nil { - t.Fatal(err) - } - if serviceInspect.ID != "service_id" { - t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index fdb33cc2..7d53e2b9 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceList returns the list of services. diff --git a/vendor/github.com/docker/docker/client/service_list_test.go b/vendor/github.com/docker/docker/client/service_list_test.go deleted file mode 100644 index 482c7ddb..00000000 --- a/vendor/github.com/docker/docker/client/service_list_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -func TestServiceListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestServiceList(t *testing.T) { - expectedURL := "/services" - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - - listCases := []struct { - options types.ServiceListOptions - expectedQueryParams map[string]string - }{ - { - options: types.ServiceListOptions{}, - expectedQueryParams: map[string]string{ - "filters": "", - }, - }, - { - options: types.ServiceListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": `{"label":{"label1":true,"label2":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]swarm.Service{ - { - ID: "service_id1", - }, - { - ID: "service_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - services, err := client.ServiceList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(services) != 2 { - t.Fatalf("expected 2 services, got %v", services) - } - } -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 4b393bb4..906fd405 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -1,14 +1,14 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. @@ -26,7 +26,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ty if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } diff --git a/vendor/github.com/docker/docker/client/service_logs_test.go b/vendor/github.com/docker/docker/client/service_logs_test.go deleted file mode 100644 index e57b9bd1..00000000 --- a/vendor/github.com/docker/docker/client/service_logs_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - - "golang.org/x/net/context" -) - -func TestServiceLogsError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ - Since: "2006-01-02TZ", - }) - if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { - t.Fatalf("expected a 'parsing time' error, got %v", err) - } -} - -func TestServiceLogs(t *testing.T) { - expectedURL := "/services/service_id/logs" - cases := []struct { - options types.ContainerLogsOptions - expectedQueryParams map[string]string - }{ - { - expectedQueryParams: map[string]string{ - "tail": "", - }, - }, - { - options: types.ContainerLogsOptions{ - Tail: "any", - }, - expectedQueryParams: map[string]string{ - "tail": "any", - }, - }, - { - options: types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: true, - Details: true, - Follow: true, - }, - expectedQueryParams: map[string]string{ - "tail": "", - "stdout": "1", - "stderr": "1", - "timestamps": "1", - "details": "1", - "follow": "1", - }, - }, - { - options: types.ContainerLogsOptions{ - // An complete invalid date, timestamp or go duration will be - // passed as is - Since: "invalid but valid", - }, - expectedQueryParams: map[string]string{ - "tail": "", - "since": "invalid but valid", - }, - }, - } - for _, logCase := range cases { - client := &Client{ - client: newMockClient(func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - // Check query parameters - query := r.URL.Query() - for key, expected := range logCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) - if err != nil { - t.Fatal(err) - } - defer body.Close() - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } - } -} - -func ExampleClient_ServiceLogs_withTimeout() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, _ := NewEnvClient() - reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) - if err != nil { - log.Fatal(err) - } - - _, err = io.Copy(os.Stdout, reader) - if err != nil && err != io.EOF { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 7ef04e82..fe3421be 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -1,6 +1,6 @@ package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go deleted file mode 100644 index 9198763f..00000000 --- a/vendor/github.com/docker/docker/client/service_remove_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestServiceRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ServiceRemove(context.Background(), "service_id") - assert.Check(t, is.Error(err, "Error response from daemon: Server error")) -} - -func TestServiceRemoveNotFoundError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "missing")), - } - - err := client.ServiceRemove(context.Background(), "service_id") - assert.Check(t, is.Error(err, "Error: No such service: service_id")) - assert.Check(t, IsErrNotFound(err)) -} - -func TestServiceRemove(t *testing.T) { - expectedURL := "/services/service_id" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.ServiceRemove(context.Background(), "service_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index 57cb45d8..5a7a61b0 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceUpdate updates a Service. diff --git a/vendor/github.com/docker/docker/client/service_update_test.go b/vendor/github.com/docker/docker/client/service_update_test.go deleted file mode 100644 index 7e20c14b..00000000 --- a/vendor/github.com/docker/docker/client/service_update_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -func TestServiceUpdateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestServiceUpdate(t *testing.T) { - expectedURL := "/services/service_id/update" - - updateCases := []struct { - swarmVersion swarm.Version - expectedVersion string - }{ - { - expectedVersion: "0", - }, - { - swarmVersion: swarm.Version{ - Index: 0, - }, - expectedVersion: "0", - }, - { - swarmVersion: swarm.Version{ - Index: 10, - }, - expectedVersion: "10", - }, - } - - for _, updateCase := range updateCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - version := req.URL.Query().Get("version") - if version != updateCase.expectedVersion { - return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), - }, nil - }), - } - - _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go index b136538c..df199f3d 100644 --- a/vendor/github.com/docker/docker/client/session.go +++ b/vendor/github.com/docker/docker/client/session.go @@ -1,10 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net" "net/http" - - "golang.org/x/net/context" ) // DialSession returns a connection that can be used communication with daemon @@ -15,5 +14,5 @@ func (cli *Client) DialSession(ctx context.Context, proto string, meta map[strin } req = cli.addHeaders(req, meta) - return cli.setupHijackConn(req, proto) + return cli.setupHijackConn(ctx, req, proto) } diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index c02ae35c..0c50c01a 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go deleted file mode 100644 index aff79440..00000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/testutil" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -func TestSwarmGetUnlockKeyError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.SwarmGetUnlockKey(context.Background()) - testutil.ErrorContains(t, err, "Error response from daemon: Server error") -} - -func TestSwarmGetUnlockKey(t *testing.T) { - expectedURL := "/swarm/unlockkey" - unlockKey := "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeE" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - - key := types.SwarmUnlockKeyResponse{ - UnlockKey: unlockKey, - } - - b, err := json.Marshal(key) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - resp, err := client.SwarmGetUnlockKey(context.Background()) - assert.NilError(t, err) - assert.Check(t, is.Equal(unlockKey, resp.UnlockKey)) -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index 29978363..742ca0f0 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmInit initializes the swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_init_test.go b/vendor/github.com/docker/docker/client/swarm_init_test.go deleted file mode 100644 index ed8028fd..00000000 --- a/vendor/github.com/docker/docker/client/swarm_init_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" -) - -func TestSwarmInitError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmInit(t *testing.T) { - expectedURL := "/swarm/init" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), - }, nil - }), - } - - resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ - ListenAddr: "0.0.0.0:2377", - }) - if err != nil { - t.Fatal(err) - } - if resp != "body" { - t.Fatalf("Expected 'body', got %s", resp) - } -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index 02f42aa8..cfaabb25 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmInspect inspects the swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_inspect_test.go b/vendor/github.com/docker/docker/client/swarm_inspect_test.go deleted file mode 100644 index 20a992a2..00000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -func TestSwarmInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.SwarmInspect(context.Background()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmInspect(t *testing.T) { - expectedURL := "/swarm" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Swarm{ - ClusterInfo: swarm.ClusterInfo{ - ID: "swarm_id", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - swarmInspect, err := client.SwarmInspect(context.Background()) - if err != nil { - t.Fatal(err) - } - if swarmInspect.ID != "swarm_id" { - t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go index 13f8d79d..a1cf0455 100644 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -1,8 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmJoin joins the swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_join_test.go b/vendor/github.com/docker/docker/client/swarm_join_test.go deleted file mode 100644 index 384b2dd4..00000000 --- a/vendor/github.com/docker/docker/client/swarm_join_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" -) - -func TestSwarmJoinError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmJoin(t *testing.T) { - expectedURL := "/swarm/join" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ - ListenAddr: "0.0.0.0:2377", - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go index d0d00cbb..90ca84b3 100644 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // SwarmLeave leaves the swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_leave_test.go b/vendor/github.com/docker/docker/client/swarm_leave_test.go deleted file mode 100644 index 5fb30c6c..00000000 --- a/vendor/github.com/docker/docker/client/swarm_leave_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestSwarmLeaveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SwarmLeave(context.Background(), false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmLeave(t *testing.T) { - expectedURL := "/swarm/leave" - - leaveCases := []struct { - force bool - expectedForce string - }{ - { - expectedForce: "", - }, - { - force: true, - expectedForce: "1", - }, - } - - for _, leaveCase := range leaveCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - force := req.URL.Query().Get("force") - if force != leaveCase.expectedForce { - return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.SwarmLeave(context.Background(), leaveCase.force) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go index 44a26bcc..d2412f7d 100644 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -1,8 +1,9 @@ package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmUnlock unlocks locked swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_unlock_test.go b/vendor/github.com/docker/docker/client/swarm_unlock_test.go deleted file mode 100644 index 82422112..00000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" -) - -func TestSwarmUnlockError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmUnlock(t *testing.T) { - expectedURL := "/swarm/unlock" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index df6f1b0d..56a5bea7 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmUpdate updates the swarm. diff --git a/vendor/github.com/docker/docker/client/swarm_update_test.go b/vendor/github.com/docker/docker/client/swarm_update_test.go deleted file mode 100644 index f4ea525a..00000000 --- a/vendor/github.com/docker/docker/client/swarm_update_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" -) - -func TestSwarmUpdateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestSwarmUpdate(t *testing.T) { - expectedURL := "/swarm/update" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index 13e0a85c..e1c0a736 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // TaskInspectWithRaw returns the task information and its raw representation.. diff --git a/vendor/github.com/docker/docker/client/task_inspect_test.go b/vendor/github.com/docker/docker/client/task_inspect_test.go deleted file mode 100644 index 6ca98dde..00000000 --- a/vendor/github.com/docker/docker/client/task_inspect_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestTaskInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestTaskInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.TaskInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestTaskInspect(t *testing.T) { - expectedURL := "/tasks/task_id" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal(swarm.Task{ - ID: "task_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") - if err != nil { - t.Fatal(err) - } - if taskInspect.ID != "task_id" { - t.Fatalf("expected `task_id`, got %s", taskInspect.ID) - } -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index e5671801..42d20c1b 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -1,13 +1,13 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // TaskList returns the list of tasks. diff --git a/vendor/github.com/docker/docker/client/task_list_test.go b/vendor/github.com/docker/docker/client/task_list_test.go deleted file mode 100644 index 900afb66..00000000 --- a/vendor/github.com/docker/docker/client/task_list_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -func TestTaskListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.TaskList(context.Background(), types.TaskListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestTaskList(t *testing.T) { - expectedURL := "/tasks" - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - - listCases := []struct { - options types.TaskListOptions - expectedQueryParams map[string]string - }{ - { - options: types.TaskListOptions{}, - expectedQueryParams: map[string]string{ - "filters": "", - }, - }, - { - options: types.TaskListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "filters": `{"label":{"label1":true,"label2":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]swarm.Task{ - { - ID: "task_id1", - }, - { - ID: "task_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - tasks, err := client.TaskList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(tasks) != 2 { - t.Fatalf("expected 2 tasks, got %v", tasks) - } - } -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index cf0b3f6b..6222fab5 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -1,12 +1,11 @@ package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" ) diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go deleted file mode 100644 index 88200e92..00000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go deleted file mode 100644 index e2985423..00000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 4bb60fab..1989f6d6 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ServerVersion returns information of the docker client and server host. diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 8108b7b9..f1f6fcdc 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -1,15 +1,15 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_create_test.go b/vendor/github.com/docker/docker/client/volume_create_test.go deleted file mode 100644 index 69ed24a5..00000000 --- a/vendor/github.com/docker/docker/client/volume_create_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" -) - -func TestVolumeCreateError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeCreate(t *testing.T) { - expectedURL := "/volumes/create" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - content, err := json.Marshal(types.Volume{ - Name: "volume", - Driver: "local", - Mountpoint: "mountpoint", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ - Name: "myvolume", - Driver: "mydriver", - DriverOpts: map[string]string{ - "opt-key": "opt-value", - }, - }) - if err != nil { - t.Fatal(err) - } - if volume.Name != "volume" { - t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) - } - if volume.Driver != "local" { - t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) - } - if volume.Mountpoint != "mountpoint" { - t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) - } -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index eb90f66f..f840682d 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -2,11 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // VolumeInspect returns the information about a specific volume in the docker host. diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go deleted file mode 100644 index 4a2cf7c7..00000000 --- a/vendor/github.com/docker/docker/client/volume_inspect_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/testutil" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestVolumeInspectError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeInspect(context.Background(), "nothing") - testutil.ErrorContains(t, err, "Error response from daemon: Server error") -} - -func TestVolumeInspectNotFound(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.VolumeInspect(context.Background(), "unknown") - assert.Check(t, IsErrNotFound(err)) -} - -func TestVolumeInspectWithEmptyID(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - return nil, errors.New("should not make request") - }), - } - _, _, err := client.VolumeInspectWithRaw(context.Background(), "") - if !IsErrNotFound(err) { - t.Fatalf("Expected NotFoundError, got %v", err) - } -} - -func TestVolumeInspect(t *testing.T) { - expectedURL := "/volumes/volume_id" - expected := types.Volume{ - Name: "name", - Driver: "driver", - Mountpoint: "mountpoint", - } - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - content, err := json.Marshal(expected) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - volume, err := client.VolumeInspect(context.Background(), "volume_id") - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, volume)) -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index aeca1cc1..284554d6 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -1,17 +1,17 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_list_test.go b/vendor/github.com/docker/docker/client/volume_list_test.go deleted file mode 100644 index b7f94641..00000000 --- a/vendor/github.com/docker/docker/client/volume_list_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" -) - -func TestVolumeListError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeList(context.Background(), filters.NewArgs()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeList(t *testing.T) { - expectedURL := "/volumes" - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - labelFilters := filters.NewArgs() - labelFilters.Add("label", "label1") - labelFilters.Add("label", "label2") - - listCases := []struct { - filters filters.Args - expectedFilters string - }{ - { - filters: filters.NewArgs(), - expectedFilters: "", - }, { - filters: noDanglingFilters, - expectedFilters: `{"dangling":{"false":true}}`, - }, { - filters: danglingFilters, - expectedFilters: `{"dangling":{"true":true}}`, - }, { - filters: labelFilters, - expectedFilters: `{"label":{"label1":true,"label2":true}}`, - }, - } - - for _, listCase := range listCases { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - actualFilters := query.Get("filters") - if actualFilters != listCase.expectedFilters { - return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) - } - content, err := json.Marshal(volumetypes.VolumesListOKBody{ - Volumes: []*types.Volume{ - { - Name: "volume", - Driver: "local", - }, - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) - if err != nil { - t.Fatal(err) - } - if len(volumeResponse.Volumes) != 1 { - t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) - } - } -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 090a6a66..70041efe 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -1,12 +1,12 @@ package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // VolumesPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 02ee573a..fc5a71d3 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -1,10 +1,10 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // VolumeRemove removes a volume from the docker host. diff --git a/vendor/github.com/docker/docker/client/volume_remove_test.go b/vendor/github.com/docker/docker/client/volume_remove_test.go deleted file mode 100644 index 5364f70b..00000000 --- a/vendor/github.com/docker/docker/client/volume_remove_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestVolumeRemoveError(t *testing.T) { - client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.VolumeRemove(context.Background(), "volume_id", false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeRemove(t *testing.T) { - expectedURL := "/volumes/volume_id" - - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.VolumeRemove(context.Background(), "volume_id", false) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/codecov.yml b/vendor/github.com/docker/docker/codecov.yml deleted file mode 100644 index 594265c6..00000000 --- a/vendor/github.com/docker/docker/codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -comment: - layout: header, changes, diff, sunburst -coverage: - status: - patch: - default: - target: 50% - only_pulls: true - # project will give us the diff in the total code coverage between a commit - # and its parent - project: - default: - target: auto - threshold: "15%" - changes: false -ignore: - - "vendor/*" diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000..e67cdabd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png new file mode 100644 index 00000000..63c0a0c0 Binary files /dev/null and b/vendor/github.com/docker/docker/docs/static_files/contributors.png differ diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 00000000..e6a2275b --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,74 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists +type ErrAlreadyExists interface { + AlreadyExists() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 00000000..c211f174 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 00000000..6169c2bc --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,240 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil { + return nil + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil { + return nil + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil { + return nil + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil { + return nil + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil { + return nil + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil { + return nil + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil { + return nil + } + return errNotModified{err} +} + +type errAlreadyExists struct{ error } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) Cause() error { + return e.error +} + +// AlreadyExists is a helper to create an error of the class with the same name from any error type +func AlreadyExists(err error) error { + if err == nil { + return nil + } + return errAlreadyExists{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil { + return nil + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil { + return nil + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil { + return nil + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil { + return nil + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil { + return nil + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 00000000..e0513331 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,114 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrAlreadyExists, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsAlreadyExists returns if the passed in error is a AlreadyExists error +func IsAlreadyExists(err error) bool { + _, ok := getImplementer(err).(ErrAlreadyExists) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh new file mode 100755 index 00000000..680bdb7b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem new file mode 120000 index 00000000..70a3e6ce --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/ca.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem new file mode 120000 index 00000000..45888202 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem new file mode 120000 index 00000000..d5f6bbee --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem new file mode 120000 index 00000000..c1860106 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem new file mode 120000 index 00000000..48b9c2df --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/opts/BUILD.bazel b/vendor/github.com/docker/docker/opts/BUILD.bazel deleted file mode 100644 index 2a7f8108..00000000 --- a/vendor/github.com/docker/docker/opts/BUILD.bazel +++ /dev/null @@ -1,86 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "env.go", - "hosts.go", - "ip.go", - "opts.go", - "quotedstring.go", - "runtime.go", - "ulimit.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "hosts_unix.go", - "opts_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "hosts_windows.go", - "opts_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/opts", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "env_test.go", - "hosts_test.go", - "ip_test.go", - "opts_test.go", - "quotedstring_test.go", - "ulimit_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/opts", - deps = [ - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go new file mode 100644 index 00000000..9b27a628 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/address_pools.go @@ -0,0 +1,84 @@ +package opts + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" + + types "github.com/docker/libnetwork/ipamutils" +) + +// PoolsOpt is a Value type for parsing the default address pools definitions +type PoolsOpt struct { + values []*types.NetworkToSplit +} + +// UnmarshalJSON fills values structure info from JSON input +func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &(p.values)) +} + +// Set predefined pools +func (p *PoolsOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + poolsDef := types.NetworkToSplit{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case "base": + poolsDef.Base = value + case "size": + size, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) + } + poolsDef.Size = size + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + p.values = append(p.values, &poolsDef) + + return nil +} + +// Type returns the type of this option +func (p *PoolsOpt) Type() string { + return "pool-options" +} + +// String returns a string repr of this option +func (p *PoolsOpt) String() string { + var pools []string + for _, pool := range p.values { + repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) + pools = append(pools, repr) + } + return strings.Join(pools, ", ") +} + +// Value returns the mounts +func (p *PoolsOpt) Value() []*types.NetworkToSplit { + return p.values +} + +// Name returns the flag name of this option +func (p *PoolsOpt) Name() string { + return "default-address-pools" +} diff --git a/vendor/github.com/docker/docker/opts/env_test.go b/vendor/github.com/docker/docker/opts/env_test.go deleted file mode 100644 index 1ecf1e2b..00000000 --- a/vendor/github.com/docker/docker/opts/env_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "os" - "runtime" - "testing" -) - -func TestValidateEnv(t *testing.T) { - testcase := []struct { - value string - expected string - err error - }{ - { - value: "a", - expected: "a", - }, - { - value: "something", - expected: "something", - }, - { - value: "_=a", - expected: "_=a", - }, - { - value: "env1=value1", - expected: "env1=value1", - }, - { - value: "_env1=value1", - expected: "_env1=value1", - }, - { - value: "env2=value2=value3", - expected: "env2=value2=value3", - }, - { - value: "env3=abc!qwe", - expected: "env3=abc!qwe", - }, - { - value: "env_4=value 4", - expected: "env_4=value 4", - }, - { - value: "PATH", - expected: fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - }, - { - value: "=a", - err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=a")), - }, - { - value: "PATH=something", - expected: "PATH=something", - }, - { - value: "asd!qwe", - expected: "asd!qwe", - }, - { - value: "1asd", - expected: "1asd", - }, - { - value: "123", - expected: "123", - }, - { - value: "some space", - expected: "some space", - }, - { - value: " some space before", - expected: " some space before", - }, - { - value: "some space after ", - expected: "some space after ", - }, - { - value: "=", - err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=")), - }, - } - - // Environment variables are case in-sensitive on Windows - if runtime.GOOS == "windows" { - tmp := struct { - value string - expected string - err error - }{ - value: "PaTh", - expected: fmt.Sprintf("PaTh=%v", os.Getenv("PATH")), - } - testcase = append(testcase, tmp) - - } - - for _, r := range testcase { - actual, err := ValidateEnv(r.value) - - if err != nil { - if r.err == nil { - t.Fatalf("Expected err is nil, got err[%v]", err) - } - if err.Error() != r.err.Error() { - t.Fatalf("Expected err[%v], got err[%v]", r.err, err) - } - } - - if err == nil && r.err != nil { - t.Fatalf("Expected err[%v], but err is nil", r.err) - } - - if actual != r.expected { - t.Fatalf("Expected [%v], got [%v]", r.expected, actual) - } - } -} diff --git a/vendor/github.com/docker/docker/opts/hosts_test.go b/vendor/github.com/docker/docker/opts/hosts_test.go deleted file mode 100644 index cd8c3f91..00000000 --- a/vendor/github.com/docker/docker/opts/hosts_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "strings" - "testing" -) - -func TestParseHost(t *testing.T) { - invalid := []string{ - "something with spaces", - "://", - "unknown://", - "tcp://:port", - "tcp://invalid:port", - } - - valid := map[string]string{ - "": DefaultHost, - " ": DefaultHost, - " ": DefaultHost, - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), - "tcp://": DefaultTCPHost, - "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), - "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix://" + DefaultUnixSocket, - "unix://path/to/socket": "unix://path/to/socket", - "npipe://": "npipe://" + DefaultNamedPipe, - "npipe:////./pipe/foo": "npipe:////./pipe/foo", - } - - for _, value := range invalid { - if _, err := ParseHost(false, value); err == nil { - t.Errorf("Expected an error for %v, got [nil]", value) - } - } - - for value, expected := range valid { - if actual, err := ParseHost(false, value); err != nil || actual != expected { - t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func TestParseDockerDaemonHost(t *testing.T) { - invalids := map[string]string{ - - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", - " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", - "": "Invalid bind address format: ", - } - valids := map[string]string{ - "0.0.0.1:": "tcp://0.0.0.1:2375", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - "[::1]:": "tcp://[::1]:2375", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), - ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), - "tcp://": DefaultTCPHost, - "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), - "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix://" + DefaultUnixSocket, - "fd://": "fd://", - "fd://something": "fd://something", - "localhost:": "tcp://localhost:2375", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := parseDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := parseDaemonHost(validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseTCP(t *testing.T) { - var ( - defaultHTTPHost = "tcp://127.0.0.1:2376" - ) - invalids := map[string]string{ - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "": defaultHTTPHost, - "tcp://": defaultHTTPHost, - "0.0.0.1:": "tcp://0.0.0.1:2376", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "[::1]:": "tcp://[::1]:2376", - "[::1]:5555": "tcp://[::1]:5555", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", - "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - "localhost:": "tcp://localhost:2376", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { - t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go deleted file mode 100644 index 966d7f21..00000000 --- a/vendor/github.com/docker/docker/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIPOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IPOpt{IP: &ip} - - invalidIP := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIP) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go index bfdcb996..de8aacb8 100644 --- a/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -7,7 +7,7 @@ import ( "regexp" "strings" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) var ( @@ -52,7 +52,7 @@ func (opts *ListOpts) Set(value string) error { } value = v } - (*opts.values) = append((*opts.values), value) + *opts.values = append(*opts.values, value) return nil } @@ -60,7 +60,7 @@ func (opts *ListOpts) Set(value string) error { func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) return } } @@ -78,7 +78,7 @@ func (opts *ListOpts) GetMap() map[string]struct{} { // GetAll returns the values of slice. func (opts *ListOpts) GetAll() []string { - return (*opts.values) + return *opts.values } // GetAllOrEmpty returns the values of the slice @@ -103,7 +103,7 @@ func (opts *ListOpts) Get(key string) bool { // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { - return len((*opts.values)) + return len(*opts.values) } // Type returns a string name for this Option type diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go deleted file mode 100644 index 577395ed..00000000 --- a/vendor/github.com/docker/docker/opts/opts_test.go +++ /dev/null @@ -1,264 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "strings" - "testing" -) - -func TestValidateIPAddress(t *testing.T) { - if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) - } - -} - -func TestMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewMapOpts(tmpMap, logOptsValidator) - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - - o.Set("max-file=2") - if len(tmpMap) != 2 { - t.Errorf("map length %d != 2", len(tmpMap)) - } - - if tmpMap["max-file"] != "2" { - t.Errorf("max-file = %s != 2", tmpMap["max-file"]) - } - - if tmpMap["max-size"] != "1" { - t.Errorf("max-size = %s != 1", tmpMap["max-size"]) - } - if o.Set("dummy-val=3") == nil { - t.Error("validator is not being called") - } -} - -func TestListOptsWithoutValidator(t *testing.T) { - o := NewListOpts(nil) - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - o.Set("bar") - if o.Len() != 2 { - t.Errorf("%d != 2", o.Len()) - } - o.Set("bar") - if o.Len() != 3 { - t.Errorf("%d != 3", o.Len()) - } - if !o.Get("bar") { - t.Error("o.Get(\"bar\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("foo") - if o.String() != "[bar bar]" { - t.Errorf("%s != [bar bar]", o.String()) - } - listOpts := o.GetAll() - if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { - t.Errorf("Expected [[bar bar]], got [%v]", listOpts) - } - mapListOpts := o.GetMap() - if len(mapListOpts) != 1 { - t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) - } - -} - -func TestListOptsWithValidator(t *testing.T) { - // Re-using logOptsvalidator (used by MapOpts) - o := NewListOpts(logOptsValidator) - o.Set("foo") - if o.String() != "" { - t.Errorf(`%s != ""`, o.String()) - } - o.Set("foo=bar") - if o.String() != "" { - t.Errorf(`%s != ""`, o.String()) - } - o.Set("max-file=2") - if o.Len() != 1 { - t.Errorf("%d != 1", o.Len()) - } - if !o.Get("max-file=2") { - t.Error("o.Get(\"max-file=2\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("max-file=2") - if o.String() != "" { - t.Errorf(`%s != ""`, o.String()) - } -} - -func TestValidateDNSSearch(t *testing.T) { - valid := []string{ - `.`, - `a`, - `a.`, - `1.foo`, - `17.foo`, - `foo.bar`, - `foo.bar.baz`, - `foo.bar.`, - `foo.bar.baz`, - `foo1.bar2`, - `foo1.bar2.baz`, - `1foo.2bar.`, - `1foo.2bar.baz`, - `foo-1.bar-2`, - `foo-1.bar-2.baz`, - `foo-1.bar-2.`, - `foo-1.bar-2.baz`, - `1-foo.2-bar`, - `1-foo.2-bar.baz`, - `1-foo.2-bar.`, - `1-foo.2-bar.baz`, - } - - invalid := []string{ - ``, - ` `, - ` `, - `17`, - `17.`, - `.17`, - `17-.`, - `17-.foo`, - `.foo`, - `foo-.bar`, - `-foo.bar`, - `foo.bar-`, - `foo.bar-.baz`, - `foo.-bar`, - `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbe`, - } - - for _, domain := range valid { - if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } - - for _, domain := range invalid { - if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } -} - -func TestValidateLabel(t *testing.T) { - if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { - t.Fatalf("Expected an error [bad attribute format: label], go %v", err) - } - if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { - t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) - } - // Validate it's working with more than one = - if actual, err := ValidateLabel("key1=value1=value2"); err != nil { - t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) - } - // Validate it's working with one more - if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { - t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) - } -} - -func logOptsValidator(val string) (string, error) { - allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} - vals := strings.Split(val, "=") - if allowedKeys[vals[0]] != "" { - return val, nil - } - return "", fmt.Errorf("invalid key %s", vals[0]) -} - -func TestNamedListOpts(t *testing.T) { - var v []string - o := NewNamedListOptsRef("foo-name", &v, nil) - - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - if o.Name() != "foo-name" { - t.Errorf("%s != foo-name", o.Name()) - } - if len(v) != 1 { - t.Errorf("expected foo to be in the values, got %v", v) - } -} - -func TestNamedMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewNamedMapOpts("max-name", tmpMap, nil) - - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - if o.Name() != "max-name" { - t.Errorf("%s != max-name", o.Name()) - } - if _, exist := tmpMap["max-size"]; !exist { - t.Errorf("expected map-size to be in the values, got %v", tmpMap) - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} diff --git a/vendor/github.com/docker/docker/opts/quotedstring_test.go b/vendor/github.com/docker/docker/opts/quotedstring_test.go deleted file mode 100644 index 21e6e4c8..00000000 --- a/vendor/github.com/docker/docker/opts/quotedstring_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestQuotedStringSetWithQuotes(t *testing.T) { - value := "" - qs := NewQuotedString(&value) - assert.Check(t, qs.Set(`"something"`)) - assert.Check(t, is.Equal("something", qs.String())) - assert.Check(t, is.Equal("something", value)) -} - -func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { - value := "" - qs := NewQuotedString(&value) - assert.Check(t, qs.Set(`"something'`)) - assert.Check(t, is.Equal(`"something'`, qs.String())) -} - -func TestQuotedStringSetWithNoQuotes(t *testing.T) { - value := "" - qs := NewQuotedString(&value) - assert.Check(t, qs.Set("something")) - assert.Check(t, is.Equal("something", qs.String())) -} diff --git a/vendor/github.com/docker/docker/opts/ulimit_test.go b/vendor/github.com/docker/docker/opts/ulimit_test.go deleted file mode 100644 index 41e12627..00000000 --- a/vendor/github.com/docker/docker/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "testing" - - "github.com/docker/go-units" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*units.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md deleted file mode 100644 index 755cd968..00000000 --- a/vendor/github.com/docker/docker/pkg/README.md +++ /dev/null @@ -1,11 +0,0 @@ -pkg/ is a collection of utility packages used by the Moby project without being specific to its internals. - -Utility packages are kept separate from the moby core codebase to keep it as small and concise as possible. -If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the -Moby organization, to facilitate re-use by other projects. However that is not the priority. - -The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core -Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! - -Because utility packages are small and neatly separated from the rest of the codebase, they are a good -place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/docker/docker/pkg/archive/BUILD.bazel b/vendor/github.com/docker/docker/pkg/archive/BUILD.bazel deleted file mode 100644 index 352bc58d..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/BUILD.bazel +++ /dev/null @@ -1,257 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "archive.go", - "changes.go", - "copy.go", - "diff.go", - "whiteouts.go", - "wrap.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "archive_linux.go", - "archive_unix.go", - "changes_linux.go", - "changes_unix.go", - "copy_unix.go", - "time_linux.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "archive_other.go", - "archive_unix.go", - "changes_other.go", - "changes_unix.go", - "copy_unix.go", - "time_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "archive_other.go", - "archive_windows.go", - "changes_other.go", - "changes_windows.go", - "copy_windows.go", - "time_unsupported.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/archive", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/pkg/fileutils:go_default_library", - "//vendor/github.com/docker/docker/pkg/idtools:go_default_library", - "//vendor/github.com/docker/docker/pkg/ioutils:go_default_library", - "//vendor/github.com/docker/docker/pkg/pools:go_default_library", - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "archive_test.go", - "changes_posix_test.go", - "changes_test.go", - "diff_test.go", - "utils_test.go", - "wrap_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "archive_linux_test.go", - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "archive_unix_test.go", - "copy_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "archive_windows_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/archive", - deps = [ - "//vendor/github.com/docker/docker/pkg/idtools:go_default_library", - "//vendor/github.com/docker/docker/pkg/ioutils:go_default_library", - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d969..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index e8f50869..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1281 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -var unpigzPath string - -func init() { - if path, err := exec.LookPath("unpigz"); err != nil { - logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") - } else { - logrus.Debugf("Using unpigz binary found at path %s", path) - unpigzPath = path - } -} - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.IDPair - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappingsVar *idtools.IDMappings -} - -// NewDefaultArchiver returns a new Archiver without any IDMappings -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") - if disablePigzEnv != "" { - if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { - return nil, err - } else if disablePigz { - return gzip.NewReader(buf) - } - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { - return ioutils.NewReadCloserWrapper(readBuf, func() error { - cancel() - return readBuf.Close() - }) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - header.Name = name - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IDMappings *idtools.IDMappings - ChownOpts *idtools.IDPair - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IDMappings: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - //check whether the file is overlayfs whiteout - //if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && - !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && - !ta.IDMappings.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(idMappings, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMappingsVar.RootPair() - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IDMappings returns the IDMappings of the archiver. -func (archiver *Archiver) IDMappings() *idtools.IDMappings { - return archiver.IDMappingsVar -} - -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - }() - - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go deleted file mode 100644 index b397c8ab..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - - "github.com/docker/docker/pkg/system" - "github.com/gotestyourself/gotestyourself/assert" - "golang.org/x/sys/unix" -) - -// setupOverlayTestDir creates files in a directory with overlay whiteouts -// Tree layout -// . -// ├── d1 # opaque, 0700 -// │   └── f1 # empty file, 0600 -// ├── d2 # opaque, 0750 -// │   └── f1 # empty file, 0660 -// └── d3 # 0700 -// └── f1 # whiteout, 0644 -func setupOverlayTestDir(t *testing.T, src string) { - // Create opaque directory containing single file and permission 0700 - err := os.Mkdir(filepath.Join(src, "d1"), 0700) - assert.NilError(t, err) - - err = system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0) - assert.NilError(t, err) - - err = ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600) - assert.NilError(t, err) - - // Create another opaque directory containing single file but with permission 0750 - err = os.Mkdir(filepath.Join(src, "d2"), 0750) - assert.NilError(t, err) - - err = system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0) - assert.NilError(t, err) - - err = ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660) - assert.NilError(t, err) - - // Create regular directory with deleted file - err = os.Mkdir(filepath.Join(src, "d3"), 0700) - assert.NilError(t, err) - - err = system.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0) - assert.NilError(t, err) -} - -func checkOpaqueness(t *testing.T, path string, opaque string) { - xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - assert.NilError(t, err) - - if string(xattrOpaque) != opaque { - t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) - } - -} - -func checkOverlayWhiteout(t *testing.T, path string) { - stat, err := os.Stat(path) - assert.NilError(t, err) - - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) - } - if statT.Rdev != 0 { - t.Fatalf("Non-zero device number for whiteout") - } -} - -func checkFileMode(t *testing.T, path string, perm os.FileMode) { - stat, err := os.Stat(path) - assert.NilError(t, err) - - if stat.Mode() != perm { - t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) - } -} - -func TestOverlayTarUntar(t *testing.T) { - oldmask, err := system.Umask(0) - assert.NilError(t, err) - defer system.Umask(oldmask) - - src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - assert.NilError(t, err) - defer os.RemoveAll(src) - - setupOverlayTestDir(t, src) - - dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - assert.NilError(t, err) - defer os.RemoveAll(dst) - - options := &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: OverlayWhiteoutFormat, - } - archive, err := TarWithOptions(src, options) - assert.NilError(t, err) - defer archive.Close() - - err = Untar(archive, dst, options) - assert.NilError(t, err) - - checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) - checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) - checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) - - checkOpaqueness(t, filepath.Join(dst, "d1"), "y") - checkOpaqueness(t, filepath.Join(dst, "d2"), "y") - checkOpaqueness(t, filepath.Join(dst, "d3"), "") - checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) -} - -func TestOverlayTarAUFSUntar(t *testing.T) { - oldmask, err := system.Umask(0) - assert.NilError(t, err) - defer system.Umask(oldmask) - - src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - assert.NilError(t, err) - defer os.RemoveAll(src) - - setupOverlayTestDir(t, src) - - dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - assert.NilError(t, err) - defer os.RemoveAll(dst) - - archive, err := TarWithOptions(src, &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: OverlayWhiteoutFormat, - }) - assert.NilError(t, err) - defer archive.Close() - - err = Untar(archive, dst, &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: AUFSWhiteoutFormat, - }) - assert.NilError(t, err) - - checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) - checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) - checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) - checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) - checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 462dfc63..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go deleted file mode 100644 index 958e4efc..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ /dev/null @@ -1,1360 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -var tmp string - -func init() { - tmp = "/tmp/" - if runtime.GOOS == "windows" { - tmp = os.Getenv("TEMP") + `\` - } -} - -var defaultArchiver = NewDefaultArchiver() - -func defaultTarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -func defaultUntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -func defaultCopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -func defaultCopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -func TestIsArchivePathDir(t *testing.T) { - cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if IsArchivePath(tmp + "archivedir") { - t.Fatalf("Incorrectly recognised directory as an archive") - } -} - -func TestIsArchivePathInvalidFile(t *testing.T) { - cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if IsArchivePath(tmp + "archive") { - t.Fatalf("Incorrectly recognised invalid tar path as archive") - } - if IsArchivePath(tmp + "archive.gz") { - t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") - } -} - -func TestIsArchivePathTar(t *testing.T) { - whichTar := "tar" - cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) - cmd := exec.Command("sh", "-c", cmdStr) - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if !IsArchivePath(tmp + "/archive") { - t.Fatalf("Did not recognise valid tar path as archive") - } - if !IsArchivePath(tmp + "archive.gz") { - t.Fatalf("Did not recognise valid compressed tar path as archive") - } -} - -func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { - cmd := exec.Command("sh", "-c", - fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Failed to create an archive file for test : %s.", output) - } - filename := "archive." + ext - archive, err := os.Open(tmp + filename) - if err != nil { - t.Fatalf("Failed to open file %s: %v", filename, err) - } - defer archive.Close() - - r, err := DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress %s: %v", filename, err) - } - if _, err = ioutil.ReadAll(r); err != nil { - t.Fatalf("Failed to read the decompressed stream: %v ", err) - } - if err = r.Close(); err != nil { - t.Fatalf("Failed to close the decompressed stream: %v ", err) - } - - return r -} - -func TestDecompressStreamGzip(t *testing.T) { - testDecompressStream(t, "gz", "gzip -f") -} - -func TestDecompressStreamBzip2(t *testing.T) { - testDecompressStream(t, "bz2", "bzip2 -f") -} - -func TestDecompressStreamXz(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Xz not present in msys2") - } - testDecompressStream(t, "xz", "xz -f") -} - -func TestCompressStreamXzUnsupported(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer dest.Close() - - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamBzip2Unsupported(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer dest.Close() - - _, err = CompressStream(dest, Bzip2) - if err == nil { - t.Fatalf("Should fail as bzip2 is unsupported for compression format.") - } -} - -func TestCompressStreamInvalid(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer dest.Close() - - _, err = CompressStream(dest, -1) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestExtensionInvalid(t *testing.T) { - compression := Compression(-1) - output := compression.Extension() - if output != "" { - t.Fatalf("The extension of an invalid compression should be an empty string.") - } -} - -func TestExtensionUncompressed(t *testing.T) { - compression := Uncompressed - output := compression.Extension() - if output != "tar" { - t.Fatalf("The extension of an uncompressed archive should be 'tar'.") - } -} -func TestExtensionBzip2(t *testing.T) { - compression := Bzip2 - output := compression.Extension() - if output != "tar.bz2" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") - } -} -func TestExtensionGzip(t *testing.T) { - compression := Gzip - output := compression.Extension() - if output != "tar.gz" { - t.Fatalf("The extension of a gzip archive should be 'tar.gz'") - } -} -func TestExtensionXz(t *testing.T) { - compression := Xz - output := compression.Extension() - if output != "tar.xz" { - t.Fatalf("The extension of a xz archive should be 'tar.xz'") - } -} - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := cmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - // TODO Windows: Figure out why this is failing in CI but not locally - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows CI machines") - } - badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := cmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("sh", "-c", "echo hello; exit 0") - out, err := cmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - assert.NilError(t, err) - defer os.RemoveAll(tempFolder) - invalidDestFolder := filepath.Join(tempFolder, "invalidDest") - // Create a src file - srcFile := filepath.Join(tempFolder, "src") - tarFile := filepath.Join(tempFolder, "src.tar") - os.Create(srcFile) - os.Create(invalidDestFolder) // being a file (not dir) should cause an error - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - assert.NilError(t, err) - - err = defaultUntarPath(tarFile, invalidDestFolder) - if err == nil { - t.Fatalf("UntarPath with invalid destination path should throw an error.") - } -} - -func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer os.RemoveAll(dest) - err = defaultUntarPath("/invalid/path", dest) - if err == nil { - t.Fatalf("UntarPath with invalid src path should throw an error.") - } -} - -func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - assert.NilError(t, err) - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(filepath.Join(tmpFolder, "src")) - - destFolder := filepath.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - assert.NilError(t, err) - - err = defaultUntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath shouldn't throw an error, %s.", err) - } - expectedFile := filepath.Join(destFolder, srcFileU) - _, err = os.Stat(expectedFile) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -// Do the same test as above but with the destination as file, it should fail -func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(filepath.Join(tmpFolder, "src")) - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFile := filepath.Join(tmpFolder, "dest") - _, err = os.Create(destFile) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = defaultUntarPath(tarFile, destFile) - if err == nil { - t.Fatalf("UntarPath should throw an error if the destination if a file") - } -} - -// Do the same test as above but with the destination folder already exists -// and the destination file is a directory -// It's working, see https://github.com/docker/docker/issues/10040 -func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(srcFile) - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := filepath.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination folder") - } - // Let's create a folder that will has the same path as the extracted file (from tar) - destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) - err = os.MkdirAll(destSrcFileAsFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = defaultUntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") - } -} - -func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - destFolder := filepath.Join(tempFolder, "dest") - invalidSrc := filepath.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = defaultCopyWithTar(invalidSrc, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - srcFolder := filepath.Join(tempFolder, "src") - inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = defaultCopyWithTar(srcFolder, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } -} - -// Test CopyWithTar with a file as src -func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, filepath.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content - if err != nil { - t.Fatalf("Destination file should be the same as the source.") - } -} - -// Test CopyWithTar with a folder as src -func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - src := filepath.Join(folder, filepath.Join("src", "folder")) - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content (the file inside) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - destFolder := filepath.Join(tempFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - invalidFile := filepath.Join(tempFolder, "doesnotexists") - err = defaultCopyFileWithTar(invalidFile, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - defer os.RemoveAll(tempFolder) - srcFile := filepath.Join(tempFolder, "src") - inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") - _, err = os.Create(srcFile) - if err != nil { - t.Fatal(err) - } - err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } - // FIXME Test the src file and content -} - -func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - src := filepath.Join(folder, "srcfolder") - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - err = defaultCopyFileWithTar(src, dest) - if err == nil { - t.Fatalf("CopyFileWithTar should throw an error with a folder.") - } -} - -func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, filepath.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest+"/") - if err != nil { - t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestTarFiles(t *testing.T) { - // TODO Windows: Figure out how to port this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = defaultTarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-chown-opt") - assert.NilError(t, err) - - defer os.RemoveAll(origin) - filePath := filepath.Join(origin, "1") - err = ioutil.WriteFile(filePath, []byte("hello world"), 0700) - assert.NilError(t, err) - - idMaps := []idtools.IDMap{ - 0: { - ContainerID: 0, - HostID: 0, - Size: 65536, - }, - 1: { - ContainerID: 0, - HostID: 100000, - Size: 65536, - }, - } - - cases := []struct { - opts *TarOptions - expectedUID int - expectedGID int - }{ - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1337, GID: 42}}, 1337, 42}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, - } - for _, testCase := range cases { - reader, err := TarWithOptions(filePath, testCase.opts) - assert.NilError(t, err) - tr := tar.NewReader(reader) - defer reader.Close() - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - assert.NilError(t, err) - assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") - assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") - } - } -} - -func TestTarWithOptions(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.TempDir(origin, "folder"); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 2}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - defer f.Close() - - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := filepath.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := defaultTarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := filepath.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := defaultTarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Passes but hits breakoutError: platform and architecture is not supported") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarHardlinkToSymlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "symlink1", - Typeflag: tar.TypeSymlink, - Linkname: "regfile", - Mode: 0644, - }, - { - Name: "symlink2", - Typeflag: tar.TypeLink, - Linkname: "symlink1", - Mode: 0644, - }, - { - Name: "regfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - assert.NilError(t, err) - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - assert.NilError(t, err) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} - -func TestReplaceFileTarWrapper(t *testing.T) { - filesInArchive := 20 - testcases := []struct { - doc string - filename string - modifier TarModifierFunc - expected string - fileCount int - }{ - { - doc: "Modifier creates a new file", - filename: "newfile", - modifier: createModifier(t), - expected: "the new content", - fileCount: filesInArchive + 1, - }, - { - doc: "Modifier replaces a file", - filename: "file-2", - modifier: createOrReplaceModifier, - expected: "the new content", - fileCount: filesInArchive, - }, - { - doc: "Modifier replaces the last file", - filename: fmt.Sprintf("file-%d", filesInArchive-1), - modifier: createOrReplaceModifier, - expected: "the new content", - fileCount: filesInArchive, - }, - { - doc: "Modifier appends to a file", - filename: "file-3", - modifier: appendModifier, - expected: "fooo\nnext line", - fileCount: filesInArchive, - }, - } - - for _, testcase := range testcases { - sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) - defer cleanup() - - resultArchive := ReplaceFileTarWrapper( - sourceArchive, - map[string]TarModifierFunc{testcase.filename: testcase.modifier}) - - actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) - assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) - } -} - -// TestPrefixHeaderReadable tests that files that could be created with the -// version of this package that was built with <=go17 are still readable. -func TestPrefixHeaderReadable(t *testing.T) { - // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go - var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") - - tmpDir, err := ioutil.TempDir("", "prefix-test") - assert.NilError(t, err) - defer os.RemoveAll(tmpDir) - err = Untar(bytes.NewReader(testFile), tmpDir, nil) - assert.NilError(t, err) - - baseName := "foo" - pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName - - _, err = os.Lstat(filepath.Join(tmpDir, pth)) - assert.NilError(t, err) -} - -func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - assert.NilError(t, err) - - _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) - assert.NilError(t, err) - - sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) - assert.NilError(t, err) - return sourceArchive, func() { - os.RemoveAll(srcDir) - sourceArchive.Close() - } -} - -func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - return &tar.Header{ - Mode: 0600, - Typeflag: tar.TypeReg, - }, []byte("the new content"), nil -} - -func createModifier(t *testing.T) TarModifierFunc { - return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - assert.Check(t, is.Nil(content)) - return createOrReplaceModifier(path, header, content) - } -} - -func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - buffer := bytes.Buffer{} - if content != nil { - if _, err := buffer.ReadFrom(content); err != nil { - return nil, nil, err - } - } - buffer.WriteString("\nnext line") - return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil -} - -func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { - destDir, err := ioutil.TempDir("", "docker-test-destDir") - assert.NilError(t, err) - defer os.RemoveAll(destDir) - - err = Untar(archive, destDir, nil) - assert.NilError(t, err) - - files, _ := ioutil.ReadDir(destDir) - assert.Check(t, is.Len(files, expectedCount), doc) - - content, err := ioutil.ReadFile(filepath.Join(destDir, name)) - assert.Check(t, err) - return string(content) -} - -func TestDisablePigz(t *testing.T) { - _, err := exec.LookPath("unpigz") - if err != nil { - t.Log("Test will not check full path when Pigz not installed") - } - - os.Setenv("MOBY_DISABLE_PIGZ", "true") - defer os.Unsetenv("MOBY_DISABLE_PIGZ") - - r := testDecompressStream(t, "gz", "gzip -f") - // For the bufio pool - outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) - // For the context canceller - contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) - - assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) -} - -func TestPigz(t *testing.T) { - r := testDecompressStream(t, "gz", "gzip -f") - // For the bufio pool - outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) - // For the context canceller - contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) - - _, err := exec.LookPath("unpigz") - if err == nil { - t.Log("Tested whether Pigz is used, as it installed") - assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) - } else { - t.Log("Tested whether Pigz is not used, as it not installed") - assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go deleted file mode 100644 index 5d13c354..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ /dev/null @@ -1,315 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - "testing" - - "github.com/docker/docker/pkg/system" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/sys/unix" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct{ in, expected string }{ - {"foo", "foo"}, - {"foo/bar", "foo/bar"}, - {"foo/dir/", "foo/dir/"}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {"foo/bar", false, "foo/bar"}, - {"foo/bar", true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0000}, - {0777, 0777}, - {0644, 0644}, - {0755, 0755}, - {0444, 0444}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - assert.NilError(t, err) - defer os.RemoveAll(origin) - - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - assert.NilError(t, err) - - err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - assert.NilError(t, err) - - var i1, i2 uint64 - i1, err = getNlink(filepath.Join(origin, "1")) - assert.NilError(t, err) - - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - assert.NilError(t, err) - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - assert.NilError(t, err) - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - assert.NilError(t, err) - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - assert.NilError(t, err) - - i1, err = getInode(filepath.Join(dest, "1")) - assert.NilError(t, err) - - i2, err = getInode(filepath.Join(dest, "2")) - assert.NilError(t, err) - - assert.Check(t, is.Equal(i1, i2)) -} - -func TestTarWithHardLinkAndRebase(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "docker-test-tar-hardlink-rebase") - assert.NilError(t, err) - defer os.RemoveAll(tmpDir) - - origin := filepath.Join(tmpDir, "origin") - err = os.Mkdir(origin, 0700) - assert.NilError(t, err) - - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - assert.NilError(t, err) - - err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - assert.NilError(t, err) - - var i1, i2 uint64 - i1, err = getNlink(filepath.Join(origin, "1")) - assert.NilError(t, err) - - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest := filepath.Join(tmpDir, "dest") - bRdr, err := TarResourceRebase(origin, "origin") - assert.NilError(t, err) - - dstDir, srcBase := SplitPathDirEntry(origin) - _, dstBase := SplitPathDirEntry(dest) - content := RebaseArchiveEntries(bRdr, srcBase, dstBase) - err = Untar(content, dstDir, &TarOptions{Compression: Uncompressed, NoLchown: true, NoOverwriteDirNonDir: true}) - assert.NilError(t, err) - - i1, err = getInode(filepath.Join(dest, "1")) - assert.NilError(t, err) - i2, err = getInode(filepath.Join(dest, "2")) - assert.NilError(t, err) - - assert.Check(t, is.Equal(i1, i2)) -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - // We need this conversion on ARM64 - return uint64(statT.Nlink), nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - assert.NilError(t, err) - - defer os.RemoveAll(origin) - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - assert.NilError(t, err) - - err = system.Mknod(filepath.Join(origin, "2"), unix.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))) - assert.NilError(t, err) - err = system.Mknod(filepath.Join(origin, "3"), unix.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))) - assert.NilError(t, err) - err = system.Mknod(filepath.Join(origin, "4"), unix.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))) - assert.NilError(t, err) - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - assert.NilError(t, err) - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - assert.NilError(t, err) - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - assert.NilError(t, err) - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - assert.NilError(t, err) - - changes, err := ChangesDirs(origin, dest) - assert.NilError(t, err) - - if len(changes) > 0 { - t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) - } -} - -// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows -func TestTarUntarWithXattr(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - assert.NilError(t, err) - defer os.RemoveAll(origin) - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - assert.NilError(t, err) - - err = ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700) - assert.NilError(t, err) - err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700) - assert.NilError(t, err) - err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0) - assert.NilError(t, err) - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") - if capability == nil && capability[0] != 0x00 { - t.Fatalf("Untar should have kept the 'security.capability' xattr.") - } - } -} - -func TestCopyInfoDestinationPathSymlink(t *testing.T) { - tmpDir, _ := getTestTempDirs(t) - defer removeAllPaths(tmpDir) - - root := strings.TrimRight(tmpDir, "/") + "/" - - type FileTestData struct { - resource FileData - file string - expected CopyInfo - } - - testData := []FileTestData{ - //Create a directory: /tmp/archive-copy-test*/dir1 - //Test will "copy" file1 to dir1 - {resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}}, - - //Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1 - //Test will "copy" file2 to dirSymlink - {resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}}, - - //Create a file in tmp directory: /tmp/archive-copy-test*/file1 - //Test to cover when the full file path already exists. - {resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}}, - - //Create a directory: /tmp/archive-copy*/dir2 - //Test to cover when the full directory path already exists - {resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}}, - - //Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget - //Negative test to cover symlinking to a target that does not exit - {resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}}, - - //Create a file in tmp directory for next test: /tmp/existingfile - {resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, - - //Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile - //Test to cover when the parent directory of a new file is a symlink - {resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, - } - - var dirs []FileData - for _, data := range testData { - dirs = append(dirs, data.resource) - } - provisionSampleDir(t, tmpDir, dirs) - - for _, info := range testData { - p := filepath.Join(tmpDir, info.resource.path, info.file) - ci, err := CopyInfoDestinationPath(p) - assert.Check(t, err) - assert.Check(t, is.DeepEqual(info.expected, ci)) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go deleted file mode 100644 index b3dbb327..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFileWithInvalidDest(t *testing.T) { - // TODO Windows: This is currently failing. Not sure what has - // recently changed in CopyWithTar as used to pass. Further investigation - // is required. - t.Skip("Currently fails") - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := "c:dest" - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, "src", "src") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) - if err == nil { - t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") - } -} - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct { - in, expected string - shouldFail bool - }{ - {"foo", "foo", false}, - {"foo/bar", "___", true}, // unix-styled windows path must fail - {`foo\bar`, "foo/bar", false}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) - } else if !v.shouldFail && out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {`foo\bar`, false, "foo/bar"}, - {`foo\bar`, true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0111}, - {0777, 0755}, - {0644, 0755}, - {0755, 0755}, - {0444, 0555}, - {0755 | os.ModeDir, 0755 | os.ModeDir}, - {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 43734db5..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,441 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 78a5393c..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,313 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -// OverlayChanges walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func OverlayChanges(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, overlayDeletedFile, nil) -} - -func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { - if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert - return path, nil - } - } - if fi.Mode()&os.ModeDir != 0 { - opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") - if err != nil { - return "", err - } - if len(opaque) == 1 && opaque[0] == 'y' { - return path, nil - } - } - - return "", nil - -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index ba744741..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go deleted file mode 100644 index 76ee40bb..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "sort" - "testing" -) - -func TestHardLinkOrder(t *testing.T) { - names := []string{"file1.txt", "file2.txt", "file3.txt"} - msg := []byte("Hey y'all") - - // Create dir - src, err := ioutil.TempDir("", "docker-hardlink-test-src-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - for _, name := range names { - func() { - fh, err := os.Create(path.Join(src, name)) - if err != nil { - t.Fatal(err) - } - defer fh.Close() - if _, err = fh.Write(msg); err != nil { - t.Fatal(err) - } - }() - } - // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") - if err != nil { - t.Fatal(err) - } - os.RemoveAll(dest) // we just want the name, at first - if err := copyDir(src, dest); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - for _, name := range names { - for i := 0; i < 5; i++ { - if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { - t.Fatal(err) - } - } - } - - // get changes - changes, err := ChangesDirs(dest, src) - if err != nil { - t.Fatal(err) - } - - // sort - sort.Sort(changesByPath(changes)) - - // ExportChanges - ar, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrs, err := walkHeaders(ar) - if err != nil { - t.Fatal(err) - } - - // reverse sort - sort.Sort(sort.Reverse(changesByPath(changes))) - // ExportChanges - arRev, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrsRev, err := walkHeaders(arRev) - if err != nil { - t.Fatal(err) - } - - // line up the two sets - sort.Sort(tarHeaders(hdrs)) - sort.Sort(tarHeaders(hdrsRev)) - - // compare Size and LinkName - for i := range hdrs { - if hdrs[i].Name != hdrsRev[i].Name { - t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) - } - if hdrs[i].Size != hdrsRev[i].Size { - t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) - } - if hdrs[i].Typeflag != hdrsRev[i].Typeflag { - t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) - } - if hdrs[i].Linkname != hdrsRev[i].Linkname { - t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) - } - } - -} - -type tarHeaders []tar.Header - -func (th tarHeaders) Len() int { return len(th) } -func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } -func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } - -func walkHeaders(r io.Reader) ([]tar.Header, error) { - t := tar.NewReader(r) - headers := []tar.Header{} - for { - hdr, err := t.Next() - if err != nil { - if err == io.EOF { - break - } - return headers, err - } - headers = append(headers, *hdr) - } - return headers, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go deleted file mode 100644 index 2d316e77..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ /dev/null @@ -1,502 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "runtime" - "sort" - "testing" - "time" - - "github.com/docker/docker/pkg/system" - "github.com/gotestyourself/gotestyourself/assert" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - return exec.Command("cp", "-a", src, dst).Run() -} - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {filetype: Regular, path: "file1", contents: "file1\n", permissions: 0600}, - {filetype: Regular, path: "file2", contents: "file2\n", permissions: 0666}, - {filetype: Regular, path: "file3", contents: "file3\n", permissions: 0404}, - {filetype: Regular, path: "file4", contents: "file4\n", permissions: 0600}, - {filetype: Regular, path: "file5", contents: "file5\n", permissions: 0600}, - {filetype: Regular, path: "file6", contents: "file6\n", permissions: 0600}, - {filetype: Regular, path: "file7", contents: "file7\n", permissions: 0600}, - {filetype: Dir, path: "dir1", contents: "", permissions: 0740}, - {filetype: Regular, path: "dir1/file1-1", contents: "file1-1\n", permissions: 01444}, - {filetype: Regular, path: "dir1/file1-2", contents: "file1-2\n", permissions: 0666}, - {filetype: Dir, path: "dir2", contents: "", permissions: 0700}, - {filetype: Regular, path: "dir2/file2-1", contents: "file2-1\n", permissions: 0666}, - {filetype: Regular, path: "dir2/file2-2", contents: "file2-2\n", permissions: 0666}, - {filetype: Dir, path: "dir3", contents: "", permissions: 0700}, - {filetype: Regular, path: "dir3/file3-1", contents: "file3-1\n", permissions: 0666}, - {filetype: Regular, path: "dir3/file3-2", contents: "file3-2\n", permissions: 0666}, - {filetype: Dir, path: "dir4", contents: "", permissions: 0700}, - {filetype: Regular, path: "dir4/file3-1", contents: "file4-1\n", permissions: 0666}, - {filetype: Regular, path: "dir4/file3-2", contents: "file4-2\n", permissions: 0666}, - {filetype: Symlink, path: "symlink1", contents: "target1", permissions: 0666}, - {filetype: Symlink, path: "symlink2", contents: "target2", permissions: 0666}, - {filetype: Symlink, path: "symlink3", contents: root + "/file1", permissions: 0666}, - {filetype: Symlink, path: "symlink4", contents: root + "/symlink3", permissions: 0666}, - {filetype: Symlink, path: "dirSymlink", contents: root + "/dir1", permissions: 0740}, - } - provisionSampleDir(t, root, files) -} - -func provisionSampleDir(t *testing.T, root string, files []FileData) { - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - err := os.MkdirAll(p, info.permissions) - assert.NilError(t, err) - } else if info.filetype == Regular { - err := ioutil.WriteFile(p, []byte(info.contents), info.permissions) - assert.NilError(t, err) - } else if info.filetype == Symlink { - err := os.Symlink(info.contents, p) - assert.NilError(t, err) - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - err := system.Chtimes(p, now, now) - assert.NilError(t, err) - } - } -} - -func TestChangeString(t *testing.T) { - modifyChange := Change{"change", ChangeModify} - toString := modifyChange.String() - if toString != "C change" { - t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", toString) - } - addChange := Change{"change", ChangeAdd} - toString = addChange.String() - if toString != "A change" { - t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) - } - deleteChange := Change{"change", ChangeDelete} - toString = deleteChange.String() - if toString != "D change" { - t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) - } -} - -func TestChangesWithNoChanges(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - assert.NilError(t, err) - defer os.RemoveAll(layer) - createSampleDir(t, layer) - changes, err := Changes([]string{layer}, rwLayer) - assert.NilError(t, err) - if len(changes) != 0 { - t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) - } -} - -func TestChangesWithChanges(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - // Mock the readonly layer - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - assert.NilError(t, err) - defer os.RemoveAll(layer) - createSampleDir(t, layer) - os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) - - // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - defer os.RemoveAll(rwLayer) - - // Create a folder in RW layer - dir1 := path.Join(rwLayer, "dir1") - os.MkdirAll(dir1, 0740) - deletedFile := path.Join(dir1, ".wh.file1-2") - ioutil.WriteFile(deletedFile, []byte{}, 0600) - modifiedFile := path.Join(dir1, "file1-1") - ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) - // Let's add a subfolder for a newFile - subfolder := path.Join(dir1, "subfolder") - os.MkdirAll(subfolder, 0740) - newFile := path.Join(subfolder, "newFile") - ioutil.WriteFile(newFile, []byte{}, 0740) - - changes, err := Changes([]string{layer}, rwLayer) - assert.NilError(t, err) - - expectedChanges := []Change{ - {"/dir1", ChangeModify}, - {"/dir1/file1-1", ChangeModify}, - {"/dir1/file1-2", ChangeDelete}, - {"/dir1/subfolder", ChangeModify}, - {"/dir1/subfolder/newFile", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) -} - -// See https://github.com/docker/docker/pull/13590 -func TestChangesWithChangesGH13590(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - assert.NilError(t, err) - defer os.RemoveAll(baseLayer) - - dir3 := path.Join(baseLayer, "dir1/dir2/dir3") - os.MkdirAll(dir3, 07400) - - file := path.Join(dir3, "file.txt") - ioutil.WriteFile(file, []byte("hello"), 0666) - - layer, err := ioutil.TempDir("", "docker-changes-test2.") - assert.NilError(t, err) - defer os.RemoveAll(layer) - - // Test creating a new file - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) - file = path.Join(layer, "dir1/dir2/dir3/file1.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err := Changes([]string{baseLayer}, layer) - assert.NilError(t, err) - - expectedChanges := []Change{ - {"/dir1/dir2/dir3", ChangeModify}, - {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) - - // Now test changing a file - layer, err = ioutil.TempDir("", "docker-changes-test3.") - assert.NilError(t, err) - defer os.RemoveAll(layer) - - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - file = path.Join(layer, "dir1/dir2/dir3/file.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err = Changes([]string{baseLayer}, layer) - assert.NilError(t, err) - - expectedChanges = []Change{ - {"/dir1/dir2/dir3/file.txt", ChangeModify}, - } - checkChanges(expectedChanges, changes, t) -} - -// Create a directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - defer os.RemoveAll(src) - createSampleDir(t, src) - dst := src + "-copy" - err = copyDir(src, dst) - assert.NilError(t, err) - defer os.RemoveAll(dst) - changes, err := ChangesDirs(dst, src) - assert.NilError(t, err) - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - err := os.RemoveAll(path.Join(root, "file1")) - assert.NilError(t, err) - - // Remove a directory - err = os.RemoveAll(path.Join(root, "dir1")) - assert.NilError(t, err) - - // Remove a symlink - err = os.RemoveAll(path.Join(root, "symlink1")) - assert.NilError(t, err) - - // Rewrite a file - err = ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777) - assert.NilError(t, err) - - // Replace a file - err = os.RemoveAll(path.Join(root, "file3")) - assert.NilError(t, err) - err = ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404) - assert.NilError(t, err) - - // Touch file - err = system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - assert.NilError(t, err) - - // Replace file with dir - err = os.RemoveAll(path.Join(root, "file5")) - assert.NilError(t, err) - err = os.MkdirAll(path.Join(root, "file5"), 0666) - assert.NilError(t, err) - - // Create new file - err = ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777) - assert.NilError(t, err) - - // Create new dir - err = os.MkdirAll(path.Join(root, "dirnew"), 0766) - assert.NilError(t, err) - - // Create a new symlink - err = os.Symlink("targetnew", path.Join(root, "symlinknew")) - assert.NilError(t, err) - - // Change a symlink - err = os.RemoveAll(path.Join(root, "symlink2")) - assert.NilError(t, err) - - err = os.Symlink("target2change", path.Join(root, "symlink2")) - assert.NilError(t, err) - - // Replace dir with file - err = os.RemoveAll(path.Join(root, "dir2")) - assert.NilError(t, err) - err = ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777) - assert.NilError(t, err) - - // Touch dir - err = system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - assert.NilError(t, err) -} - -func TestChangesDirsMutated(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - createSampleDir(t, src) - dst := src + "-copy" - err = copyDir(src, dst) - assert.NilError(t, err) - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - assert.NilError(t, err) - - sort.Sort(changesByPath(changes)) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - err = copyDir(src, dst) - assert.NilError(t, err) - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - assert.NilError(t, err) - - layer, err := ExportChanges(dst, changes, nil, nil) - assert.NilError(t, err) - - layerCopy, err := NewTempArchive(layer, "") - assert.NilError(t, err) - - _, err = ApplyLayer(src, layerCopy) - assert.NilError(t, err) - - changes2, err := ChangesDirs(src, dst) - assert.NilError(t, err) - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} - -func TestChangesSizeWithHardlinks(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - assert.NilError(t, err) - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - assert.NilError(t, err) - defer os.RemoveAll(destDir) - - creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - assert.NilError(t, err) - - changes, err := ChangesDirs(destDir, srcDir) - assert.NilError(t, err) - - got := ChangesSize(destDir, changes) - if got != int64(creationSize) { - t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) - } -} - -func TestChangesSizeWithNoChanges(t *testing.T) { - size := ChangesSize("/tmp", nil) - if size != 0 { - t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) - } -} - -func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { - changes := []Change{ - {Path: "deletedPath", Kind: ChangeDelete}, - } - size := ChangesSize("/tmp", changes) - if size != 0 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "docker-changes-test") - assert.NilError(t, err) - defer os.RemoveAll(parentPath) - addition := path.Join(parentPath, "addition") - err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744) - assert.NilError(t, err) - modification := path.Join(parentPath, "modification") - err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744) - assert.NilError(t, err) - - changes := []Change{ - {Path: "addition", Kind: ChangeAdd}, - {Path: "modification", Kind: ChangeModify}, - } - size := ChangesSize(parentPath, changes) - if size != 6 { - t.Fatalf("Expected 6 bytes of changes, got %d", size) - } -} - -func checkChanges(expectedChanges, changes []Change, t *testing.T) { - sort.Sort(changesByPath(expectedChanges)) - sort.Sort(changesByPath(changes)) - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index c06a209d..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 6555c013..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mtim() != newStat.Mtim() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index d0f13ca7..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,472 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in the separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { - // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath, sep) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(sep) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { - cleanedPath += string(sep) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string, sep byte) bool { - return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string, sep byte) bool { - return len(path) > 0 && path[len(path)-1] == sep -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path, os.PathSeparator): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// TODO @gupta-ak. These might have to be changed in the future to be -// continuity driver aware as well to support LCOW. - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path, os.PathSeparator) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path, os.PathSeparator) && - !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index 3958364f..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go deleted file mode 100644 index 08b1702c..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go +++ /dev/null @@ -1,958 +0,0 @@ -// +build !windows - -// TODO Windows: Some of these tests may be salvageable and portable to Windows. - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" -) - -func removeAllPaths(paths ...string) { - for _, path := range paths { - os.RemoveAll(path) - } -} - -func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { - var err error - - tmpDirA, err = ioutil.TempDir("", "archive-copy-test") - assert.NilError(t, err) - - tmpDirB, err = ioutil.TempDir("", "archive-copy-test") - assert.NilError(t, err) - - return -} - -func isNotDir(err error) bool { - return strings.Contains(err.Error(), "not a directory") -} - -func joinTrailingSep(pathElements ...string) string { - joined := filepath.Join(pathElements...) - - return fmt.Sprintf("%s%c", joined, filepath.Separator) -} - -func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { - t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) - - fileA, err := os.Open(filenameA) - if err != nil { - return - } - defer fileA.Close() - - fileB, err := os.Open(filenameB) - if err != nil { - return - } - defer fileB.Close() - - hasher := sha256.New() - - if _, err = io.Copy(hasher, fileA); err != nil { - return - } - - hashA := hasher.Sum(nil) - hasher.Reset() - - if _, err = io.Copy(hasher, fileB); err != nil { - return - } - - hashB := hasher.Sum(nil) - - if !bytes.Equal(hashA, hashB) { - err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) - } - - return -} - -func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { - t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) - - var changes []Change - - if changes, err = ChangesDirs(newDir, oldDir); err != nil { - return - } - - if len(changes) != 0 { - err = fmt.Errorf("expected no changes between directories, but got: %v", changes) - } - - return -} - -func logDirContents(t *testing.T, dirPath string) { - logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Errorf("stat error for path %q: %s", path, err) - return nil - } - - if info.IsDir() { - path = joinTrailingSep(path) - } - - t.Logf("\t%s", path) - - return nil - }) - - t.Logf("logging directory contents: %q", dirPath) - - err := filepath.Walk(dirPath, logWalkedPaths) - assert.NilError(t, err) -} - -func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) - - return CopyResource(srcPath, dstPath, false) -} - -func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) - - return CopyResource(srcPath, dstPath, true) -} - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func TestCopyErrSrcNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func TestCopyErrSrcNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func TestCopyErrDstParentNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - // Try with a file source. - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a file whose parent does not exist. - if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a directory whose parent does not exist. - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func TestCopyErrDstNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - // Try with a file source. - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func TestCopyCaseA(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "itWorks.txt") - - var err error - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, srcPath, dstPath) - assert.NilError(t, err) - os.Remove(dstPath) - - symlinkPath := filepath.Join(tmpDirA, "symlink3") - symlinkPath1 := filepath.Join(tmpDirA, "symlink4") - linkTarget := filepath.Join(tmpDirA, "file1") - - if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, linkTarget, dstPath) - assert.NilError(t, err) - os.Remove(dstPath) - if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, linkTarget, dstPath) - assert.NilError(t, err) -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func TestCopyCaseB(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := joinTrailingSep(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } - - symlinkPath := filepath.Join(tmpDirA, "symlink3") - - if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } - -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseC(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // Ensure they start out different. - if err = fileContentsEqual(t, srcPath, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, srcPath, dstPath) - assert.NilError(t, err) -} - -// C. Symbol link following version: -// SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseCFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - symlinkPathBad := filepath.Join(tmpDirA, "symlink1") - symlinkPath := filepath.Join(tmpDirA, "symlink3") - linkTarget := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // first to test broken link - if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - // test symbol link -> symbol link -> target - // Ensure they start out different. - if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, linkTarget, dstPath) - assert.NilError(t, err) -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseD(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, srcPath, dstPath) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, srcPath, dstPath) - assert.NilError(t, err) -} - -// D. Symbol link following version: -// SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseDFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "symlink4") - linkTarget := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "symlink4") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, linkTarget, dstPath) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = fileContentsEqual(t, linkTarget, dstPath) - assert.NilError(t, err) -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseE(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, srcDir) - assert.NilError(t, err) -} - -// E. Symbol link following version: -// SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseEFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dirSymlink") - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, linkTarget) - assert.NilError(t, err) -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func TestCopyCaseF(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - symSrcDir := filepath.Join(tmpDirA, "dirSymlink") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } - - // now test with symbol link - if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseG(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, resultDir, srcDir) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, resultDir, srcDir) - assert.NilError(t, err) -} - -// G. Symbol link version: -// SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseGFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dirSymlink") - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dirSymlink") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, resultDir, linkTarget) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, resultDir, linkTarget) - assert.NilError(t, err) -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseH(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// H. Symbol link following version: -// SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseHFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func TestCopyCaseI(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - symSrcDir := filepath.Join(tmpDirB, "dirSymlink") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } - - // now try with symbol link of dir - if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJ(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - // first to create an empty dir - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, srcDir) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, srcDir) - assert.NilError(t, err) -} - -// J. Symbol link following version: -// SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - // first to create an empty dir - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, linkTarget) - assert.NilError(t, err) - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - err = dirContentsEqual(t, dstDir, linkTarget) - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index a878d1ba..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index d0cff98f..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,256 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600, "") - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(idMappings, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go deleted file mode 100644 index 19f2555e..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "testing" - - "github.com/docker/docker/pkg/ioutils" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Passes but hits breakoutError: platform and architecture is not supported") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TypeLink support on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TypeSymLink support on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerWhiteouts(t *testing.T) { - // TODO Windows: Figure out why this test fails - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - - wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") - if err != nil { - return - } - defer os.RemoveAll(wd) - - base := []string{ - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "baz", - "foo/", - "foo/.abc", - "foo/.bcd/", - "foo/.bcd/a", - "foo/cde/", - "foo/cde/def", - "foo/cde/efg", - "foo/fgh", - "foobar", - } - - type tcase struct { - change, expected []string - } - - tcases := []tcase{ - { - base, - base, - }, - { - []string{ - ".bay", - ".wh.baz", - "foo/", - "foo/.bce", - "foo/.wh..wh..opq", - "foo/cde/", - "foo/cde/efg", - }, - []string{ - ".bay", - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.bce", - "foo/cde/", - "foo/cde/efg", - "foobar", - }, - }, - { - []string{ - ".bay", - ".wh..baz", - ".wh.foobar", - "foo/", - "foo/.abc", - "foo/.wh.cde", - "bar/", - }, - []string{ - ".bay", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.abc", - "foo/.bce", - }, - }, - { - []string{ - ".abc", - ".wh..wh..opq", - "foobar", - }, - []string{ - ".abc", - "foobar", - }, - }, - } - - for i, tc := range tcases { - l, err := makeTestLayer(tc.change) - if err != nil { - t.Fatal(err) - } - - _, err = UnpackLayer(wd, l, nil) - if err != nil { - t.Fatal(err) - } - err = l.Close() - if err != nil { - t.Fatal(err) - } - - paths, err := readDirContents(wd) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(tc.expected, paths) { - t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) - } - } - -} - -func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { - tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - for _, p := range paths { - if p[len(p)-1] == filepath.Separator { - if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { - return - } - } else { - if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { - return - } - } - } - archive, err := Tar(tmpDir, Uncompressed) - if err != nil { - return - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - os.RemoveAll(tmpDir) - return err - }), nil -} - -func readDirContents(root string) ([]string, error) { - var files []string - err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == root { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - if info.IsDir() { - rel = rel + "/" - } - files = append(files, rel) - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index 495db809..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 58aefe3e..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index f58bf227..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go deleted file mode 100644 index a20f58dd..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/utils_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, r) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index 85435694..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go deleted file mode 100644 index 97953677..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" -) - -func TestGenerateEmptyFile(t *testing.T) { - archive, err := Generate("emptyFile") - assert.NilError(t, err) - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"emptyFile", ""}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - assert.NilError(t, err) - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} - -func TestGenerateWithContent(t *testing.T) { - archive, err := Generate("file", "content") - assert.NilError(t, err) - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"file", "content"}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - assert.NilError(t, err) - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/BUILD.bazel b/vendor/github.com/docker/docker/pkg/fileutils/BUILD.bazel deleted file mode 100644 index 930de8fd..00000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "fileutils.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "fileutils_darwin.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "fileutils_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "fileutils_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "fileutils_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/fileutils", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/sirupsen/logrus:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["fileutils_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/fileutils", - deps = [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 88e0f283..28cad499 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -13,7 +13,7 @@ import ( "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths agaist a list of patterns +// PatternMatcher allows checking paths against a list of patterns type PatternMatcher struct { patterns []*Pattern exclusions bool diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go deleted file mode 100644 index b167538d..00000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ /dev/null @@ -1,591 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -// CopyFile with invalid src -func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with invalid dest -func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "file") - err = ioutil.WriteFile(src, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with same src and dest -func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - file := path.Join(tempFolder, "file") - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, file) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -// CopyFile with same src and dest but path is different and not clean -func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - testFolder := path.Join(tempFolder, "test") - err = os.MkdirAll(testFolder, 0740) - if err != nil { - t.Fatal(err) - } - file := path.Join(testFolder, "file") - sameFile := testFolder + "/../test/file" - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, sameFile) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "src") - dest := path.Join(tempFolder, "dest") - ioutil.WriteFile(src, []byte("content"), 0777) - ioutil.WriteFile(dest, []byte("destContent"), 0777) - bytes, err := CopyFile(src, dest) - if err != nil { - t.Fatal(err) - } - if bytes != 7 { - t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) - } - actual, err := ioutil.ReadFile(dest) - if err != nil { - t.Fatal(err) - } - if string(actual) != "content" { - t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWildcardMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*"}) - if !match { - t.Errorf("failed to get a wildcard match, got %v", match) - } -} - -// A simple pattern match should return true. -func TestPatternMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go"}) - if !match { - t.Errorf("failed to get a match, got %v", match) - } -} - -// An exclusion followed by an inclusion should return true. -func TestExclusionPatternMatchesPatternBefore(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if !match { - t.Errorf("failed to get true match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A pattern followed by an exclusion should return false. -func TestExclusionPatternMatchesPatternAfter(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match { - t.Errorf("failed to get false match on exclusion pattern, got %v", match) - } -} - -// A filename evaluating to . should return false. -func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { - match, _ := Matches(".", []string{"*.go"}) - if match { - t.Errorf("failed to get false match on ., got %v", match) - } -} - -// A single ! pattern should return an error. -func TestSingleExclamationError(t *testing.T) { - _, err := Matches("fileutils.go", []string{"!"}) - if err == nil { - t.Errorf("failed to get an error for a single exclamation point, got %v", err) - } -} - -// Matches with no patterns -func TestMatchesWithNoPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{}) - if err != nil { - t.Fatal(err) - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Matches with malformed patterns -func TestMatchesWithMalformedPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{"["}) - if err == nil { - t.Fatal("Should have failed because of a malformed syntax in the pattern") - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -type matchesTestCase struct { - pattern string - text string - pass bool -} - -func TestMatches(t *testing.T) { - tests := []matchesTestCase{ - {"**", "file", true}, - {"**", "file/", true}, - {"**/", "file", true}, // weird one - {"**/", "file/", true}, - {"**", "/", true}, - {"**/", "/", true}, - {"**", "dir/file", true}, - {"**/", "dir/file", true}, - {"**", "dir/file/", true}, - {"**/", "dir/file/", true}, - {"**/**", "dir/file", true}, - {"**/**", "dir/file/", true}, - {"dir/**", "dir/file", true}, - {"dir/**", "dir/file/", true}, - {"dir/**", "dir/dir2/file", true}, - {"dir/**", "dir/dir2/file/", true}, - {"**/dir2/*", "dir/dir2/file", true}, - {"**/dir2/*", "dir/dir2/file/", true}, - {"**/dir2/**", "dir/dir2/dir3/file", true}, - {"**/dir2/**", "dir/dir2/dir3/file/", true}, - {"**file", "file", true}, - {"**file", "dir/file", true}, - {"**/file", "dir/file", true}, - {"**file", "dir/dir/file", true}, - {"**/file", "dir/dir/file", true}, - {"**/file*", "dir/dir/file", true}, - {"**/file*", "dir/dir/file.txt", true}, - {"**/file*txt", "dir/dir/file.txt", true}, - {"**/file*.txt", "dir/dir/file.txt", true}, - {"**/file*.txt*", "dir/dir/file.txt", true}, - {"**/**/*.txt", "dir/dir/file.txt", true}, - {"**/**/*.txt2", "dir/dir/file.txt", false}, - {"**/*.txt", "file.txt", true}, - {"**/**/*.txt", "file.txt", true}, - {"a**/*.txt", "a/file.txt", true}, - {"a**/*.txt", "a/dir/file.txt", true}, - {"a**/*.txt", "a/dir/dir/file.txt", true}, - {"a/*.txt", "a/dir/file.txt", false}, - {"a/*.txt", "a/file.txt", true}, - {"a/*.txt**", "a/file.txt", true}, - {"a[b-d]e", "ae", false}, - {"a[b-d]e", "ace", true}, - {"a[b-d]e", "aae", false}, - {"a[^b-d]e", "aze", true}, - {".*", ".foo", true}, - {".*", "foo", false}, - {"abc.def", "abcdef", false}, - {"abc.def", "abc.def", true}, - {"abc.def", "abcZdef", false}, - {"abc?def", "abcZdef", true}, - {"abc?def", "abcdef", false}, - {"a\\\\", "a\\", true}, - {"**/foo/bar", "foo/bar", true}, - {"**/foo/bar", "dir/foo/bar", true}, - {"**/foo/bar", "dir/dir2/foo/bar", true}, - {"abc/**", "abc", false}, - {"abc/**", "abc/def", true}, - {"abc/**", "abc/def/ghi", true}, - {"**/.foo", ".foo", true}, - {"**/.foo", "bar.foo", false}, - } - - if runtime.GOOS != "windows" { - tests = append(tests, []matchesTestCase{ - {"a\\*b", "a*b", true}, - {"a\\", "a", false}, - {"a\\", "a\\", false}, - }...) - } - - for _, test := range tests { - desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) - pm, err := NewPatternMatcher([]string{test.pattern}) - assert.NilError(t, err, desc) - res, _ := pm.Matches(test.text) - assert.Check(t, is.Equal(test.pass, res), desc) - } -} - -func TestCleanPatterns(t *testing.T) { - patterns := []string{"docs", "config"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - cleaned := pm.Patterns() - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - patterns := []string{"docs", "config", ""} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - cleaned := pm.Patterns() - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsExceptionFlag(t *testing.T) { - patterns := []string{"docs", "!docs/README.md"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) - } -} - -func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - patterns := []string{"docs", " !docs/README.md"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) - } -} - -func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - patterns := []string{"docs", "!docs/README.md "} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) - } -} - -func TestCleanPatternsErrorSingleException(t *testing.T) { - patterns := []string{"!"} - _, err := NewPatternMatcher(patterns) - if err == nil { - t.Errorf("expected error on single exclamation point, got %v", err) - } -} - -func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - folderToCreate := filepath.Join(tempFolder, "tocreate") - - if err := CreateIfNotExists(folderToCreate, true); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(folderToCreate) - if err != nil { - t.Fatalf("Should have create a folder, got %v", err) - } - - if !fileinfo.IsDir() { - t.Fatalf("Should have been a dir, seems it's not") - } -} - -func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - fileToCreate := filepath.Join(tempFolder, "file/to/create") - - if err := CreateIfNotExists(fileToCreate, false); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(fileToCreate) - if err != nil { - t.Fatalf("Should have create a file, got %v", err) - } - - if fileinfo.IsDir() { - t.Fatalf("Should have been a file, seems it's not") - } -} - -// These matchTests are stolen from go's filepath Match tests. -type matchTest struct { - pattern, s string - match bool - err error -} - -var matchTests = []matchTest{ - {"abc", "abc", true, nil}, - {"*", "abc", true, nil}, - {"*c", "abc", true, nil}, - {"a*", "a", true, nil}, - {"a*", "abc", true, nil}, - {"a*", "ab/c", true, nil}, - {"a*/b", "abc/b", true, nil}, - {"a*/b", "a/c/b", false, nil}, - {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, - {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, - {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, - {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, - {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, - {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, - {"ab[c]", "abc", true, nil}, - {"ab[b-d]", "abc", true, nil}, - {"ab[e-g]", "abc", false, nil}, - {"ab[^c]", "abc", false, nil}, - {"ab[^b-d]", "abc", false, nil}, - {"ab[^e-g]", "abc", true, nil}, - {"a\\*b", "a*b", true, nil}, - {"a\\*b", "ab", false, nil}, - {"a?b", "a☺b", true, nil}, - {"a[^a]b", "a☺b", true, nil}, - {"a???b", "a☺b", false, nil}, - {"a[^a][^a][^a]b", "a☺b", false, nil}, - {"[a-ζ]*", "α", true, nil}, - {"*[a-ζ]", "A", false, nil}, - {"a?b", "a/b", false, nil}, - {"a*b", "a/b", false, nil}, - {"[\\]a]", "]", true, nil}, - {"[\\-]", "-", true, nil}, - {"[x\\-]", "x", true, nil}, - {"[x\\-]", "-", true, nil}, - {"[x\\-]", "z", false, nil}, - {"[\\-x]", "x", true, nil}, - {"[\\-x]", "-", true, nil}, - {"[\\-x]", "a", false, nil}, - {"[]a]", "]", false, filepath.ErrBadPattern}, - {"[-]", "-", false, filepath.ErrBadPattern}, - {"[x-]", "x", false, filepath.ErrBadPattern}, - {"[x-]", "-", false, filepath.ErrBadPattern}, - {"[x-]", "z", false, filepath.ErrBadPattern}, - {"[-x]", "x", false, filepath.ErrBadPattern}, - {"[-x]", "-", false, filepath.ErrBadPattern}, - {"[-x]", "a", false, filepath.ErrBadPattern}, - {"\\", "a", false, filepath.ErrBadPattern}, - {"[a-b-c]", "a", false, filepath.ErrBadPattern}, - {"[", "a", false, filepath.ErrBadPattern}, - {"[^", "a", false, filepath.ErrBadPattern}, - {"[^bc", "a", false, filepath.ErrBadPattern}, - {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong - {"a[", "ab", false, filepath.ErrBadPattern}, - {"*x", "xxx", true, nil}, -} - -func errp(e error) string { - if e == nil { - return "" - } - return e.Error() -} - -// TestMatch test's our version of filepath.Match, called regexpMatch. -func TestMatch(t *testing.T) { - for _, tt := range matchTests { - pattern := tt.pattern - s := tt.s - if runtime.GOOS == "windows" { - if strings.Contains(pattern, "\\") { - // no escape allowed on windows. - continue - } - pattern = filepath.Clean(pattern) - s = filepath.Clean(s) - } - ok, err := Matches(s, []string{pattern}) - if ok != tt.match || err != tt.err { - t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/BUILD.bazel b/vendor/github.com/docker/docker/pkg/homedir/BUILD.bazel deleted file mode 100644 index 8738c2d5..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/BUILD.bazel +++ /dev/null @@ -1,95 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = select({ - "@io_bazel_rules_go//go/platform:android": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "homedir_linux.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "homedir_others.go", - "homedir_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "homedir_others.go", - "homedir_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/homedir", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/docker/pkg/idtools:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = ["homedir_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/homedir", -) diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go deleted file mode 100644 index 49c42224..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "path/filepath" - "testing" -) - -func TestGet(t *testing.T) { - home := Get() - if home == "" { - t.Fatal("returned home directory is empty") - } - - if !filepath.IsAbs(home) { - t.Fatalf("returned path is not absolute: %s", home) - } -} - -func TestGetShortcutString(t *testing.T) { - shortcut := GetShortcutString() - if shortcut == "" { - t.Fatal("returned shortcut string is empty") - } -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/BUILD.bazel b/vendor/github.com/docker/docker/pkg/idtools/BUILD.bazel deleted file mode 100644 index cd92d267..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/BUILD.bazel +++ /dev/null @@ -1,214 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "idtools.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "idtools_unix.go", - "usergroupadd_linux.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "idtools_unix.go", - "usergroupadd_unsupported.go", - "utils_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "idtools_windows.go", - "usergroupadd_unsupported.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/idtools", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:android": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "idtools_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "idtools_unix_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/idtools", - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/skip:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index e2b49315..230422ea 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -30,30 +30,30 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" ) // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, true) +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) } // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership. // Note that unlike os.Mkdir(), this function does not return IsExist error // in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, false, true) +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) } // MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, false) +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. @@ -102,22 +102,23 @@ func toHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// IDPair is a UID and GID pair -type IDPair struct { +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { UID int GID int + SID string } -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { uids []IDMap gids []IDMap } -// NewIDMappings takes a requested user and group name and +// NewIdentityMapping takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { +func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { subuidRanges, err := parseSubuid(username) if err != nil { return nil, err @@ -133,7 +134,7 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) { return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return &IDMappings{ + return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil @@ -141,21 +142,21 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) { // NewIDMappingsFromMaps creates a new mapping from two slices // Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { +func (i *IdentityMapping) RootPair() Identity { uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} + return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() @@ -173,7 +174,7 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { uid, err := toContainer(pair.UID, i.uids) if err != nil { return -1, -1, err @@ -183,19 +184,19 @@ func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { } // Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { +func (i *IdentityMapping) Empty() bool { return len(i.uids) == 0 && len(i.gids) == 0 } // UIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { +func (i *IdentityMapping) UIDs() []IDMap { return i.uids } // GIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { +func (i *IdentityMapping) GIDs() []IDMap { return i.gids } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 1d87ea3b..fb239743 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -21,11 +21,12 @@ var ( getentCmd string ) -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists + var paths []string stat, err := system.Stat(path) @@ -38,7 +39,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown } // short-circuit--we were called with an existing directory and chown was requested - return lazyChown(path, ownerUID, ownerGID, stat) + return lazyChown(path, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { @@ -69,7 +70,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { + if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { return err } } @@ -78,7 +79,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { +func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go deleted file mode 100644 index e493b9e8..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ /dev/null @@ -1,397 +0,0 @@ -// +build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "io/ioutil" - "os" - "os/user" - "path/filepath" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/gotestyourself/gotestyourself/skip" - "golang.org/x/sys/unix" -) - -const ( - tempUser = "tempuser" -) - -type node struct { - uid int - gid int -} - -func TestMkdirAllAndChown(t *testing.T) { - RequiresRoot(t) - dirName, err := ioutil.TempDir("", "mkdirall") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllAndChown(filepath.Join(dirName, "usr", "share"), 0755, IDPair{UID: 99, GID: 99}); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllAndChown(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{UID: 101, GID: 101}); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should be chowned, but nothing else - if err := MkdirAllAndChown(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 102, GID: 102}); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAllAndChownNew(t *testing.T) { - RequiresRoot(t) - dirName, err := ioutil.TempDir("", "mkdirnew") - assert.NilError(t, err) - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - assert.NilError(t, buildTree(dirName, testTree)) - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{UID: 99, GID: 99}) - assert.NilError(t, err) - - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - assert.NilError(t, err) - assert.NilError(t, compareTrees(testTree, verifyTree)) - - // test 2-deep new directories--both should be owned by the uid/gid pair - err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{UID: 101, GID: 101}) - assert.NilError(t, err) - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - assert.NilError(t, err) - assert.NilError(t, compareTrees(testTree, verifyTree)) - - // test a directory that already exists; should NOT be chowned - err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 102, GID: 102}) - assert.NilError(t, err) - verifyTree, err = readTree(dirName, "") - assert.NilError(t, err) - assert.NilError(t, compareTrees(testTree, verifyTree)) -} - -func TestMkdirAndChown(t *testing.T) { - RequiresRoot(t) - dirName, err := ioutil.TempDir("", "mkdir") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - } - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should just chown to the requested uid/gid - if err := MkdirAndChown(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 99, GID: 99}); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // create a subdir under a dir which doesn't exist--should fail - if err := MkdirAndChown(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, IDPair{UID: 102, GID: 102}); err == nil { - t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") - } - - // create a subdir under an existing dir; should only change the ownership of the new subdir - if err := MkdirAndChown(filepath.Join(dirName, "usr", "bin"), 0755, IDPair{UID: 102, GID: 102}); err != nil { - t.Fatal(err) - } - testTree["usr/bin"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func buildTree(base string, tree map[string]node) error { - for path, node := range tree { - fullPath := filepath.Join(base, path) - if err := os.MkdirAll(fullPath, 0755); err != nil { - return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) - } - if err := os.Chown(fullPath, node.uid, node.gid); err != nil { - return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) - } - } - return nil -} - -func readTree(base, root string) (map[string]node, error) { - tree := make(map[string]node) - - dirInfos, err := ioutil.ReadDir(base) - if err != nil { - return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) - } - - for _, info := range dirInfos { - s := &unix.Stat_t{} - if err := unix.Stat(filepath.Join(base, info.Name()), s); err != nil { - return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) - } - tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} - if info.IsDir() { - // read the subdirectory - subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) - if err != nil { - return nil, err - } - for path, nodeinfo := range subtree { - tree[path] = nodeinfo - } - } - } - return tree, nil -} - -func compareTrees(left, right map[string]node) error { - if len(left) != len(right) { - return fmt.Errorf("Trees aren't the same size") - } - for path, nodeLeft := range left { - if nodeRight, ok := right[path]; ok { - if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { - // mismatch - return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, - nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) - } - continue - } - return fmt.Errorf("right tree didn't contain path %q", path) - } - return nil -} - -func delUser(t *testing.T, name string) { - _, err := execCmd("userdel", name) - assert.Check(t, err) -} - -func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "parsesubid") - if err != nil { - t.Fatal(err) - } - fnamePath := filepath.Join(tmpDir, "testsubuid") - fcontent := `tss:100000:65536 -# empty default subuid/subgid file - -dockremap:231072:65536` - if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { - t.Fatal(err) - } - ranges, err := parseSubidFile(fnamePath, "dockremap") - if err != nil { - t.Fatal(err) - } - if len(ranges) != 1 { - t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) - } - if ranges[0].Start != 231072 { - t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) - } - if ranges[0].Length != 65536 { - t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) - } -} - -func TestGetRootUIDGID(t *testing.T) { - uidMap := []IDMap{ - { - ContainerID: 0, - HostID: os.Getuid(), - Size: 1, - }, - } - gidMap := []IDMap{ - { - ContainerID: 0, - HostID: os.Getgid(), - Size: 1, - }, - } - - uid, gid, err := GetRootUIDGID(uidMap, gidMap) - assert.Check(t, err) - assert.Check(t, is.Equal(os.Getegid(), uid)) - assert.Check(t, is.Equal(os.Getegid(), gid)) - - uidMapError := []IDMap{ - { - ContainerID: 1, - HostID: os.Getuid(), - Size: 1, - }, - } - _, _, err = GetRootUIDGID(uidMapError, gidMap) - assert.Check(t, is.Error(err, "Container ID 0 cannot be mapped to a host ID")) -} - -func TestToContainer(t *testing.T) { - uidMap := []IDMap{ - { - ContainerID: 2, - HostID: 2, - Size: 1, - }, - } - - containerID, err := toContainer(2, uidMap) - assert.Check(t, err) - assert.Check(t, is.Equal(uidMap[0].ContainerID, containerID)) -} - -func TestNewIDMappings(t *testing.T) { - RequiresRoot(t) - _, _, err := AddNamespaceRangesUser(tempUser) - assert.Check(t, err) - defer delUser(t, tempUser) - - tempUser, err := user.Lookup(tempUser) - assert.Check(t, err) - - gids, err := tempUser.GroupIds() - assert.Check(t, err) - group, err := user.LookupGroupId(string(gids[0])) - assert.Check(t, err) - - idMappings, err := NewIDMappings(tempUser.Username, group.Name) - assert.Check(t, err) - - rootUID, rootGID, err := GetRootUIDGID(idMappings.UIDs(), idMappings.GIDs()) - assert.Check(t, err) - - dirName, err := ioutil.TempDir("", "mkdirall") - assert.Check(t, err, "Couldn't create temp directory") - defer os.RemoveAll(dirName) - - err = MkdirAllAndChown(dirName, 0700, IDPair{UID: rootUID, GID: rootGID}) - assert.Check(t, err, "Couldn't change ownership of file path. Got error") - assert.Check(t, CanAccess(dirName, idMappings.RootPair()), fmt.Sprintf("Unable to access %s directory with user UID:%d and GID:%d", dirName, rootUID, rootGID)) -} - -func TestLookupUserAndGroup(t *testing.T) { - RequiresRoot(t) - uid, gid, err := AddNamespaceRangesUser(tempUser) - assert.Check(t, err) - defer delUser(t, tempUser) - - fetchedUser, err := LookupUser(tempUser) - assert.Check(t, err) - - fetchedUserByID, err := LookupUID(uid) - assert.Check(t, err) - assert.Check(t, is.DeepEqual(fetchedUserByID, fetchedUser)) - - fetchedGroup, err := LookupGroup(tempUser) - assert.Check(t, err) - - fetchedGroupByID, err := LookupGID(gid) - assert.Check(t, err) - assert.Check(t, is.DeepEqual(fetchedGroupByID, fetchedGroup)) -} - -func TestLookupUserAndGroupThatDoesNotExist(t *testing.T) { - fakeUser := "fakeuser" - _, err := LookupUser(fakeUser) - assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeUser+"\" in passwd database")) - - _, err = LookupUID(-1) - assert.Check(t, is.ErrorContains(err, "")) - - fakeGroup := "fakegroup" - _, err = LookupGroup(fakeGroup) - assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeGroup+"\" in group database")) - - _, err = LookupGID(-1) - assert.Check(t, is.ErrorContains(err, "")) -} - -// TestMkdirIsNotDir checks that mkdirAs() function (used by MkdirAll...) -// returns a correct error in case a directory which it is about to create -// already exists but is a file (rather than a directory). -func TestMkdirIsNotDir(t *testing.T) { - file, err := ioutil.TempFile("", t.Name()) - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.Remove(file.Name()) - - err = mkdirAs(file.Name(), 0755, 0, 0, false, false) - assert.Check(t, is.Error(err, "mkdir "+file.Name()+": not a directory")) -} - -func RequiresRoot(t *testing.T) { - skip.IfCondition(t, os.Getuid() != 0, "skipping test that requires root") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index d72cc289..4ae38a1b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -6,9 +6,11 @@ import ( "github.com/docker/docker/pkg/system" ) -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { if err := system.MkdirAll(path, mode, ""); err != nil { return err } @@ -18,6 +20,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { +func CanAccess(path string, identity Identity) bool { return true } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/BUILD.bazel b/vendor/github.com/docker/docker/pkg/ioutils/BUILD.bazel deleted file mode 100644 index b6919dab..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/BUILD.bazel +++ /dev/null @@ -1,76 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "buffer.go", - "bytespipe.go", - "fswriters.go", - "readers.go", - "writeflusher.go", - "writers.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "temp_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "temp_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/ioutils", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "buffer_test.go", - "bytespipe_test.go", - "fswriters_test.go", - "readers_test.go", - "writers_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/ioutils", - deps = [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go deleted file mode 100644 index b8887bfd..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "bytes" - "testing" -) - -func TestFixedBufferCap(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 5)} - - n := buf.Cap() - if n != 5 { - t.Fatalf("expected buffer capacity to be 5 bytes, got %d", n) - } -} - -func TestFixedBufferLen(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 10)} - - buf.Write([]byte("hello")) - l := buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to be 5 bytes, got %d", l) - } - - buf.Write([]byte("world")) - l = buf.Len() - if l != 10 { - t.Fatalf("expected buffer length to be 10 bytes, got %d", l) - } - - // read 5 bytes - b := make([]byte, 5) - buf.Read(b) - - l = buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to be 5 bytes, got %d", l) - } - - n, err := buf.Write([]byte("i-wont-fit")) - if n != 0 { - t.Fatalf("expected no bytes to be written to buffer, got %d", n) - } - if err != errBufferFull { - t.Fatalf("expected errBufferFull, got %v", err) - } - - l = buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to still be 5 bytes, got %d", l) - } - - buf.Reset() - l = buf.Len() - if l != 0 { - t.Fatalf("expected buffer length to still be 0 bytes, got %d", l) - } -} - -func TestFixedBufferString(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 10)} - - buf.Write([]byte("hello")) - buf.Write([]byte("world")) - - out := buf.String() - if out != "helloworld" { - t.Fatalf("expected output to be \"helloworld\", got %q", out) - } - - // read 5 bytes - b := make([]byte, 5) - buf.Read(b) - - // test that fixedBuffer.String() only returns the part that hasn't been read - out = buf.String() - if out != "world" { - t.Fatalf("expected output to be \"world\", got %q", out) - } -} - -func TestFixedBufferWrite(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 64)} - n, err := buf.Write([]byte("hello")) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes written, got %d", n) - } - - if string(buf.buf[:5]) != "hello" { - t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) - } - - n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) - if n != 59 { - t.Fatalf("expected 59 bytes written before buffer is full, got %d", n) - } - if err != errBufferFull { - t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) - } -} - -func TestFixedBufferRead(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 64)} - if _, err := buf.Write([]byte("hello world")); err != nil { - t.Fatal(err) - } - - b := make([]byte, 5) - n, err := buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) - } - - if string(b) != "hello" { - t.Fatalf("expected \"hello\", got %q", string(b)) - } - - n, err = buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes read, got %d", n) - } - - if string(b) != " worl" { - t.Fatalf("expected \" worl\", got %s", string(b)) - } - - b = b[:1] - n, err = buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 1 { - t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) - } - - if string(b) != "d" { - t.Fatalf("expected \"d\", got %s", string(b)) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go deleted file mode 100644 index 9101f20a..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "crypto/sha1" - "encoding/hex" - "math/rand" - "testing" - "time" -) - -func TestBytesPipeRead(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - rd := make([]byte, 4) - n, err := buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "1234" { - t.Fatalf("Read %s, but must be %s", rd, "1234") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "5678" { - t.Fatalf("Read %s, but must be %s", rd, "5679") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) - } - if string(rd[:n]) != "90" { - t.Fatalf("Read %s, but must be %s", rd, "90") - } -} - -func TestBytesPipeWrite(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - if buf.buf[0].String() != "1234567890" { - t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") - } -} - -// Write and read in different speeds/chunk sizes and check valid data is read. -func TestBytesPipeWriteRandomChunks(t *testing.T) { - cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ - {100, 10, 1}, - {1000, 10, 5}, - {1000, 100, 0}, - {1000, 5, 6}, - {10000, 50, 25}, - } - - testMessage := []byte("this is a random string for testing") - // random slice sizes to read and write - writeChunks := []int{25, 35, 15, 20} - readChunks := []int{5, 45, 20, 25} - - for _, c := range cases { - // first pass: write directly to hash - hash := sha1.New() - for i := 0; i < c.iterations*c.writesPerLoop; i++ { - if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { - t.Fatal(err) - } - } - expected := hex.EncodeToString(hash.Sum(nil)) - - // write/read through buffer - buf := NewBytesPipe() - hash.Reset() - - done := make(chan struct{}) - - go func() { - // random delay before read starts - <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) - for i := 0; ; i++ { - p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) - n, _ := buf.Read(p) - if n == 0 { - break - } - hash.Write(p[:n]) - } - - close(done) - }() - - for i := 0; i < c.iterations; i++ { - for w := 0; w < c.writesPerLoop; w++ { - buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) - } - } - buf.Close() - <-done - - actual := hex.EncodeToString(hash.Sum(nil)) - - if expected != actual { - t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) - } - - } -} - -func BenchmarkBytesPipeWrite(b *testing.B) { - testData := []byte("pretty short line, because why not?") - for i := 0; i < b.N; i++ { - readBuf := make([]byte, 1024) - buf := NewBytesPipe() - go func() { - var err error - for err == nil { - _, err = buf.Read(readBuf) - } - }() - for j := 0; j < 1000; j++ { - buf.Write(testData) - } - buf.Close() - } -} - -func BenchmarkBytesPipeRead(b *testing.B) { - rd := make([]byte, 512) - for i := 0; i < b.N; i++ { - b.StopTimer() - buf := NewBytesPipe() - for j := 0; j < 500; j++ { - buf.Write(make([]byte, 1024)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - if n, _ := buf.Read(rd); n != 512 { - b.Fatalf("Wrong number of bytes: %d", n) - } - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go deleted file mode 100644 index b283045d..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" -) - -var ( - testMode os.FileMode = 0640 -) - -func init() { - // Windows does not support full Linux file mode - if runtime.GOOS == "windows" { - testMode = 0666 - } -} - -func TestAtomicWriteToFile(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writers-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - expected := []byte("barbaz") - if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) - if err != nil { - t.Fatalf("Error reading from file: %v", err) - } - - if !bytes.Equal(actual, expected) { - t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) - } - - st, err := os.Stat(filepath.Join(tmpDir, "foo")) - if err != nil { - t.Fatalf("Error statting file: %v", err) - } - if expected := os.FileMode(testMode); st.Mode() != expected { - t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) - } -} - -func TestAtomicWriteSetCommit(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { - t.Fatalf("Error creating tmp directory: %s", err) - } - - targetDir := filepath.Join(tmpDir, "target") - ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) - if err != nil { - t.Fatalf("Error creating atomic write set: %s", err) - } - - expected := []byte("barbaz") - if err := ws.WriteFile("foo", expected, testMode); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { - t.Fatalf("Expected error reading file where should not exist") - } - - if err := ws.Commit(targetDir); err != nil { - t.Fatalf("Error committing file: %s", err) - } - - actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) - if err != nil { - t.Fatalf("Error reading from file: %v", err) - } - - if !bytes.Equal(actual, expected) { - t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) - } - - st, err := os.Stat(filepath.Join(targetDir, "foo")) - if err != nil { - t.Fatalf("Error statting file: %v", err) - } - if expected := os.FileMode(testMode); st.Mode() != expected { - t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) - } - -} - -func TestAtomicWriteSetCancel(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { - t.Fatalf("Error creating tmp directory: %s", err) - } - - ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) - if err != nil { - t.Fatalf("Error creating atomic write set: %s", err) - } - - expected := []byte("barbaz") - if err := ws.WriteFile("foo", expected, testMode); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - if err := ws.Cancel(); err != nil { - t.Fatalf("Error committing file: %s", err) - } - - if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { - t.Fatalf("Expected error reading file where should not exist") - } else if !os.IsNotExist(err) { - t.Fatalf("Unexpected error reading file: %s", err) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 72f7f231..1f657bd3 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -1,11 +1,10 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( + "context" "crypto/sha256" "encoding/hex" "io" - - "golang.org/x/net/context" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go deleted file mode 100644 index e009ab26..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "fmt" - "io/ioutil" - "strings" - "testing" - "time" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/net/context" -) - -// Implement io.Reader -type errorReader struct{} - -func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("error reader always fail") -} - -func TestReadCloserWrapperClose(t *testing.T) { - reader := strings.NewReader("A string reader") - wrapper := NewReadCloserWrapper(reader, func() error { - return fmt.Errorf("This will be called when closing") - }) - err := wrapper.Close() - if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { - t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") - } -} - -func TestReaderErrWrapperReadOnError(t *testing.T) { - called := false - reader := &errorReader{} - wrapper := NewReaderErrWrapper(reader, func() { - called = true - }) - _, err := wrapper.Read([]byte{}) - assert.Check(t, is.Error(err, "error reader always fail")) - if !called { - t.Fatalf("readErrWrapper should have call the anonymous function on failure") - } -} - -func TestReaderErrWrapperRead(t *testing.T) { - reader := strings.NewReader("a string reader.") - wrapper := NewReaderErrWrapper(reader, func() { - t.Fatalf("readErrWrapper should not have called the anonymous function") - }) - // Read 20 byte (should be ok with the string above) - num, err := wrapper.Read(make([]byte, 20)) - if err != nil { - t.Fatal(err) - } - if num != 16 { - t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) - } -} - -func TestHashData(t *testing.T) { - reader := strings.NewReader("hash-me") - actual, err := HashData(reader) - if err != nil { - t.Fatal(err) - } - expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" - if actual != expected { - t.Fatalf("Expecting %s, got %s", expected, actual) - } -} - -type perpetualReader struct{} - -func (p *perpetualReader) Read(buf []byte) (n int, err error) { - for i := 0; i != len(buf); i++ { - buf[i] = 'a' - } - return len(buf), nil -} - -func TestCancelReadCloser(t *testing.T) { - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) - for { - var buf [128]byte - _, err := cancelReadCloser.Read(buf[:]) - if err == context.DeadlineExceeded { - break - } else if err != nil { - t.Fatalf("got unexpected error: %v", err) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go deleted file mode 100644 index 94d446f9..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteCloserWrapperClose(t *testing.T) { - called := false - writer := bytes.NewBuffer([]byte{}) - wrapper := NewWriteCloserWrapper(writer, func() error { - called = true - return nil - }) - if err := wrapper.Close(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("writeCloserWrapper should have call the anonymous function.") - } -} - -func TestNopWriteCloser(t *testing.T) { - writer := bytes.NewBuffer([]byte{}) - wrapper := NopWriteCloser(writer) - if err := wrapper.Close(); err != nil { - t.Fatal("NopWriteCloser always return nil on Close.") - } - -} - -func TestNopWriter(t *testing.T) { - nw := &NopWriter{} - l, err := nw.Write([]byte{'c'}) - if err != nil { - t.Fatal(err) - } - if l != 1 { - t.Fatalf("Expected 1 got %d", l) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/BUILD.bazel b/vendor/github.com/docker/docker/pkg/jsonmessage/BUILD.bazel deleted file mode 100644 index b02b612a..00000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["jsonmessage.go"], - importpath = "github.com/docker/docker/pkg/jsonmessage", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/Nvveen/Gotty:go_default_library", - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["jsonmessage_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/jsonmessage", - deps = [ - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go deleted file mode 100644 index f9ead207..00000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "bytes" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/term" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestError(t *testing.T) { - je := JSONError{404, "Not found"} - assert.Assert(t, is.Error(&je, "Not found")) -} - -func TestProgressString(t *testing.T) { - type expected struct { - short string - long string - } - - shortAndLong := func(short, long string) expected { - return expected{short: short, long: long} - } - - start := time.Date(2017, 12, 3, 15, 10, 1, 0, time.UTC) - timeAfter := func(delta time.Duration) func() time.Time { - return func() time.Time { - return start.Add(delta) - } - } - - var testcases = []struct { - name string - progress JSONProgress - expected expected - }{ - { - name: "no progress", - }, - { - name: "progress 1", - progress: JSONProgress{Current: 1}, - expected: shortAndLong(" 1B", " 1B"), - }, - { - name: "some progress with a start time", - progress: JSONProgress{ - Current: 20, - Total: 100, - Start: start.Unix(), - nowFunc: timeAfter(time.Second), - }, - expected: shortAndLong( - " 20B/100B 4s", - "[==========> ] 20B/100B 4s", - ), - }, - { - name: "some progress without a start time", - progress: JSONProgress{Current: 50, Total: 100}, - expected: shortAndLong( - " 50B/100B", - "[=========================> ] 50B/100B", - ), - }, - { - name: "current more than total is not negative gh#7136", - progress: JSONProgress{Current: 50, Total: 40}, - expected: shortAndLong( - " 50B", - "[==================================================>] 50B", - ), - }, - { - name: "with units", - progress: JSONProgress{Current: 50, Total: 100, Units: "units"}, - expected: shortAndLong( - "50/100 units", - "[=========================> ] 50/100 units", - ), - }, - { - name: "current more than total with units is not negative ", - progress: JSONProgress{Current: 50, Total: 40, Units: "units"}, - expected: shortAndLong( - "50 units", - "[==================================================>] 50 units", - ), - }, - { - name: "hide counts", - progress: JSONProgress{Current: 50, Total: 100, HideCounts: true}, - expected: shortAndLong( - "", - "[=========================> ] ", - ), - }, - } - - for _, testcase := range testcases { - t.Run(testcase.name, func(t *testing.T) { - testcase.progress.winSize = 100 - assert.Equal(t, testcase.progress.String(), testcase.expected.short) - - testcase.progress.winSize = 200 - assert.Equal(t, testcase.progress.String(), testcase.expected.long) - }) - } -} - -func TestJSONMessageDisplay(t *testing.T) { - now := time.Now() - messages := map[JSONMessage][]string{ - // Empty - {}: {"\n", "\n"}, - // Status - { - Status: "status", - }: { - "status\n", - "status\n", - }, - // General - { - Time: now.Unix(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)), - }, - // General, with nano precision time - { - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), - }, - // General, with both times Nano is preferred - { - Time: now.Unix(), - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), - }, - // Stream over status - { - Status: "status", - Stream: "stream", - }: { - "stream", - "stream", - }, - // With progress message - { - Status: "status", - ProgressMessage: "progressMessage", - }: { - "status progressMessage", - "status progressMessage", - }, - // With progress, stream empty - { - Status: "status", - Stream: "", - Progress: &JSONProgress{Current: 1}, - }: { - "", - fmt.Sprintf("%c[1K%c[K\rstatus 1B\r", 27, 27), - }, - } - - // The tests :) - for jsonMessage, expectedMessages := range messages { - // Without terminal - data := bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, nil); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected %q,got %q", expectedMessages[0], data.String()) - } - // With terminal - data = bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, &noTermInfo{}); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) - } - } -} - -// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. -func TestJSONMessageDisplayWithJSONError(t *testing.T) { - data := bytes.NewBuffer([]byte{}) - jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} - - err := jsonMessage.Display(data, &noTermInfo{}) - if err == nil || err.Error() != "Can't find it" { - t.Fatalf("Expected a JSONError 404, got %q", err) - } - - jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} - err = jsonMessage.Display(data, &noTermInfo{}) - assert.Check(t, is.Error(err, "authentication is required")) -} - -func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { - var ( - inFd uintptr - ) - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader("This is not a 'valid' JSON []") - inFd, _ = term.GetFdInfo(reader) - - if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { - t.Fatalf("Should have thrown an error (invalid character in ..), got %q", err) - } -} - -func TestDisplayJSONMessagesStream(t *testing.T) { - var ( - inFd uintptr - ) - - messages := map[string][]string{ - // empty string - "": { - "", - ""}, - // Without progress & ID - "{ \"status\": \"status\" }": { - "status\n", - "status\n", - }, - // Without progress, with ID - "{ \"id\": \"ID\",\"status\": \"status\" }": { - "ID: status\n", - fmt.Sprintf("ID: status\n"), - }, - // With progress - "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { - "ID: status ProgressMessage", - fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), - }, - // With progressDetail - "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { - "", // progressbar is disabled in non-terminal - fmt.Sprintf("\n%c[%dA%c[1K%c[K\rID: status 1B\r%c[%dB", 27, 1, 27, 27, 27, 1), - }, - } - - // Use $TERM which is unlikely to exist, forcing DisplayJSONMessageStream to - // (hopefully) use &noTermInfo. - origTerm := os.Getenv("TERM") - os.Setenv("TERM", "xyzzy-non-existent-terminfo") - - for jsonMessage, expectedMessages := range messages { - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader(jsonMessage) - inFd, _ = term.GetFdInfo(reader) - - // Without terminal - if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected an %q, got %q", expectedMessages[0], data.String()) - } - - // With terminal - data = bytes.NewBuffer([]byte{}) - reader = strings.NewReader(jsonMessage) - if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) - } - } - os.Setenv("TERM", origTerm) - -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/BUILD.bazel b/vendor/github.com/docker/docker/pkg/longpath/BUILD.bazel deleted file mode 100644 index 414d280f..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["longpath.go"], - importpath = "github.com/docker/docker/pkg/longpath", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["longpath_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/longpath", -) diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go deleted file mode 100644 index 2bcd008e..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "strings" - "testing" -) - -func TestStandardLongPath(t *testing.T) { - c := `C:\simple\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\C:\simple\path`) { - t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) - } -} - -func TestUNCLongPath(t *testing.T) { - c := `\\server\share\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { - t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/BUILD.bazel b/vendor/github.com/docker/docker/pkg/mount/BUILD.bazel deleted file mode 100644 index f6c5244e..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/BUILD.bazel +++ /dev/null @@ -1,134 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "flags.go", - "mount.go", - "mountinfo.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "flags_freebsd.go", - "flags_unsupported.go", - "mounter_freebsd.go", - "mounter_unsupported.go", - "mountinfo_freebsd.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "flags_linux.go", - "mounter_linux.go", - "mountinfo_linux.go", - "sharedsubtree_linux.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "flags_unsupported.go", - "mounter_unsupported.go", - "mountinfo_windows.go", - ], - "//conditions:default": [], - }), - cgo = True, - importpath = "github.com/docker/docker/pkg/mount", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/sirupsen/logrus:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:android": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "mount_unix_test.go", - "mounter_linux_test.go", - "mountinfo_linux_test.go", - "sharedsubtree_linux_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "mount_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "mount_unix_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/mount", - deps = select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 7bdbf8a8..874aff65 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -3,32 +3,64 @@ package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" "strings" - "syscall" "github.com/sirupsen/logrus" ) -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) } // Mounted determines if a specified mountpoint has been mounted. // On Linux it looks at /proc/self/mountinfo. func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() + entries, err := GetMounts(SingleEntryFilter(mountpoint)) if err != nil { return false, err } - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil + return len(entries) > 0, nil } // Mount will mount filesystem according to the specified configuration, on the @@ -57,16 +89,18 @@ func ForceMount(device, target, mType, options string) error { // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err + err := unmount(target, mntDetach) + if err == syscall.EINVAL { + // ignore "not mounted" error + err = nil } - return unmount(target, mntDetach) + return err } // RecursiveUnmount unmounts the target and all mounts underneath, starting with // the deepsest mount first. func RecursiveUnmount(target string) error { - mounts, err := GetMounts() + mounts, err := parseMountTable(PrefixFilter(target)) if err != nil { return err } @@ -77,9 +111,6 @@ func RecursiveUnmount(target string) error { }) for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } logrus.Debugf("Trying to unmount %s", m.Mountpoint) err = unmount(m.Mountpoint, mntDetach) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go deleted file mode 100644 index 84699eee..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build !windows - -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "os" - "path" - "testing" -) - -func TestMountOptionsParsing(t *testing.T) { - options := "noatime,ro,size=10k" - - flag, data := parseOptions(options) - - if data != "size=10k" { - t.Fatalf("Expected size=10 got %s", data) - } - - expectedFlag := NOATIME | RDONLY - - if flag != expectedFlag { - t.Fatalf("Expected %d got %d", expectedFlag, flag) - } -} - -func TestMounted(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - mounted, err := Mounted(targetDir) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatalf("Expected %s to be mounted", targetDir) - } - if _, err := os.Stat(targetDir); err != nil { - t.Fatal(err) - } -} - -func TestMountReadonly(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) - if err == nil { - t.Fatal("Should not be able to open a ro file as rw") - } -} - -func TestGetMounts(t *testing.T) { - mounts, err := GetMounts() - if err != nil { - t.Fatal(err) - } - - root := false - for _, entry := range mounts { - if entry.Mountpoint == "/" { - root = true - } - } - - if !root { - t.Fatal("/ should be mounted at least") - } -} - -func TestMergeTmpfsOptions(t *testing.T) { - options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} - expected := []string{"atime", "rw", "size=1024k", "slave"} - merged, err := MergeTmpfsOptions(options) - if err != nil { - t.Fatal(err) - } - if len(expected) != len(merged) { - t.Fatalf("Expected %s got %s", expected, merged) - } - for index := range merged { - if merged[index] != expected[index] { - t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) - } - } - - options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} - _, err = MergeTmpfsOptions(options) - if err == nil { - t.Fatal("Expected error got nil") - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go deleted file mode 100644 index e5da438e..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go +++ /dev/null @@ -1,234 +0,0 @@ -// +build linux - -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" - - selinux "github.com/opencontainers/selinux/go-selinux" -) - -func TestMount(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - source, err := ioutil.TempDir("", "mount-test-source-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(source) - - // Ensure we have a known start point by mounting tmpfs with given options - if err := Mount("tmpfs", source, "tmpfs", "private"); err != nil { - t.Fatal(err) - } - defer ensureUnmount(t, source) - validateMount(t, source, "", "", "") - if t.Failed() { - t.FailNow() - } - - target, err := ioutil.TempDir("", "mount-test-target-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(target) - - tests := []struct { - source string - ftype string - options string - expectedOpts string - expectedOptional string - expectedVFS string - }{ - // No options - {"tmpfs", "tmpfs", "", "", "", ""}, - // Default rw / ro test - {source, "", "bind", "", "", ""}, - {source, "", "bind,private", "", "", ""}, - {source, "", "bind,shared", "", "shared", ""}, - {source, "", "bind,slave", "", "master", ""}, - {source, "", "bind,unbindable", "", "unbindable", ""}, - // Read Write tests - {source, "", "bind,rw", "rw", "", ""}, - {source, "", "bind,rw,private", "rw", "", ""}, - {source, "", "bind,rw,shared", "rw", "shared", ""}, - {source, "", "bind,rw,slave", "rw", "master", ""}, - {source, "", "bind,rw,unbindable", "rw", "unbindable", ""}, - // Read Only tests - {source, "", "bind,ro", "ro", "", ""}, - {source, "", "bind,ro,private", "ro", "", ""}, - {source, "", "bind,ro,shared", "ro", "shared", ""}, - {source, "", "bind,ro,slave", "ro", "master", ""}, - {source, "", "bind,ro,unbindable", "ro", "unbindable", ""}, - // Remount tests to change per filesystem options - {"", "", "remount,size=128k", "rw", "", "rw,size=128k"}, - {"", "", "remount,ro,size=128k", "ro", "", "ro,size=128k"}, - } - - for _, tc := range tests { - ftype, options := tc.ftype, tc.options - if tc.ftype == "" { - ftype = "none" - } - if tc.options == "" { - options = "none" - } - - t.Run(fmt.Sprintf("%v-%v", ftype, options), func(t *testing.T) { - if strings.Contains(tc.options, "slave") { - // Slave requires a shared source - if err := MakeShared(source); err != nil { - t.Fatal(err) - } - defer func() { - if err := MakePrivate(source); err != nil { - t.Fatal(err) - } - }() - } - if strings.Contains(tc.options, "remount") { - // create a new mount to remount first - if err := Mount("tmpfs", target, "tmpfs", ""); err != nil { - t.Fatal(err) - } - } - if err := Mount(tc.source, target, tc.ftype, tc.options); err != nil { - t.Fatal(err) - } - defer ensureUnmount(t, target) - expectedVFS := tc.expectedVFS - if selinux.GetEnabled() && expectedVFS != "" { - expectedVFS = expectedVFS + ",seclabel" - } - validateMount(t, target, tc.expectedOpts, tc.expectedOptional, expectedVFS) - }) - } -} - -// ensureUnmount umounts mnt checking for errors -func ensureUnmount(t *testing.T, mnt string) { - if err := Unmount(mnt); err != nil { - t.Error(err) - } -} - -// validateMount checks that mnt has the given options -func validateMount(t *testing.T, mnt string, opts, optional, vfs string) { - info, err := GetMounts() - if err != nil { - t.Fatal(err) - } - - wantedOpts := make(map[string]struct{}) - if opts != "" { - for _, opt := range strings.Split(opts, ",") { - wantedOpts[opt] = struct{}{} - } - } - - wantedOptional := make(map[string]struct{}) - if optional != "" { - for _, opt := range strings.Split(optional, ",") { - wantedOptional[opt] = struct{}{} - } - } - - wantedVFS := make(map[string]struct{}) - if vfs != "" { - for _, opt := range strings.Split(vfs, ",") { - wantedVFS[opt] = struct{}{} - } - } - - mnts := make(map[int]*Info, len(info)) - for _, mi := range info { - mnts[mi.ID] = mi - } - - for _, mi := range info { - if mi.Mountpoint != mnt { - continue - } - - // Use parent info as the defaults - p := mnts[mi.Parent] - pOpts := make(map[string]struct{}) - if p.Opts != "" { - for _, opt := range strings.Split(p.Opts, ",") { - pOpts[clean(opt)] = struct{}{} - } - } - pOptional := make(map[string]struct{}) - if p.Optional != "" { - for _, field := range strings.Split(p.Optional, ",") { - pOptional[clean(field)] = struct{}{} - } - } - - // Validate Opts - if mi.Opts != "" { - for _, opt := range strings.Split(mi.Opts, ",") { - opt = clean(opt) - if !has(wantedOpts, opt) && !has(pOpts, opt) { - t.Errorf("unexpected mount option %q expected %q", opt, opts) - } - delete(wantedOpts, opt) - } - } - for opt := range wantedOpts { - t.Errorf("missing mount option %q found %q", opt, mi.Opts) - } - - // Validate Optional - if mi.Optional != "" { - for _, field := range strings.Split(mi.Optional, ",") { - field = clean(field) - if !has(wantedOptional, field) && !has(pOptional, field) { - t.Errorf("unexpected optional failed %q expected %q", field, optional) - } - delete(wantedOptional, field) - } - } - for field := range wantedOptional { - t.Errorf("missing optional field %q found %q", field, mi.Optional) - } - - // Validate VFS if set - if vfs != "" { - if mi.VfsOpts != "" { - for _, opt := range strings.Split(mi.VfsOpts, ",") { - opt = clean(opt) - if !has(wantedVFS, opt) { - t.Errorf("unexpected mount option %q expected %q", opt, vfs) - } - delete(wantedVFS, opt) - } - } - for opt := range wantedVFS { - t.Errorf("missing mount option %q found %q", opt, mi.VfsOpts) - } - } - - return - } - - t.Errorf("failed to find mount %q", mnt) -} - -// clean strips off any value param after the colon -func clean(v string) string { - return strings.SplitN(v, ":", 2)[0] -} - -// has returns true if key is a member of m -func has(m map[string]struct{}, key string) bool { - _, ok := m[key] - return ok -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index b86f4a97..36c89dc1 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable() ([]*Info, error) { +func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -32,10 +32,24 @@ func parseMountTable() ([]*Info, error) { var out []*Info for _, entry := range entries { var mountinfo Info + var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + if stop { + break + } } return out, nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index 4afa8d53..c1dba01f 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -5,80 +5,119 @@ import ( "fmt" "io" "os" + "strconv" "strings" ) -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } - return parseInfoFile(f) -} + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } } - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) } - if optionalFields != "-" { - p.Optional = optionalFields + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") out = append(out, p) + if stop { + break + } } return out, nil } +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. @@ -89,5 +128,5 @@ func PidMountInfo(pid int) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f) + return parseInfoFile(f, nil) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go deleted file mode 100644 index 771b3538..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go +++ /dev/null @@ -1,476 +0,0 @@ -// +build linux - -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "bytes" - "testing" -) - -const ( - fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw - 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel - 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 - 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw - 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw - 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel - 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 - 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 - 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 - 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd - 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw - 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children - 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children - 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children - 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children - 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children - 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children - 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children - 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children - 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children - 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered - 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct - 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel - 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel - 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel - 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw - 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw - 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw - 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw - 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered - 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered - 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered - 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered - 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 - 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw - 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered - 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered - 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered - 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered - 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered - 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered - 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered - 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered - 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered - 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered - 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered - 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered - 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered - 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered - 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered - 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered - 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered - 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered - 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered - 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered - 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered - 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered - 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` - - ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 -20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw -31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb -38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 -40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 -41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 -42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 -43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 -44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 -45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 -46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 -47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 -48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 -49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 -50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 -51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 -52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 -53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 -54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 -55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 -56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 -57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 -58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 -59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 -60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 -61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 -62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 -63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 -64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 -65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 -66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 -67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 -68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 -69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 -70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 -71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 -72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 -73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 -74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 -75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 -76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 -77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 -78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 -79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 -80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 -81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 -82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 -83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 -84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 -85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 -86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 -87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 -88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 -89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 -90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 -91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 -92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 -93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 -94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 -95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 -96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 -97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 -98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 -99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 -100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 -101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 -102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 -103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 -104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 -105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 -106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 -107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 -108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 -109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 -110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 -111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 -112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 -113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 -114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 -115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 -116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 -117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 -118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 -119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 -120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 -121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 -122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 -123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 -124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 -125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 -126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 -127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 -128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 -129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 -130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 -131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 -132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 -133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 -134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 -135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 -136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 -137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 -138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 -139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 -140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 -141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 -142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 -143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 -144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` - - gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 -18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 -19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw -22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw -24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 -25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc -26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children -27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children -28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children -29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children -30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children -31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children -32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children -33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro -34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota -35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw -36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw -43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw -44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 -68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c -86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c -39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c -40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c -41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c -45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c -46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c -47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c -48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c -49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c -50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c -51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c -52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c -53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c -54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c -55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c -56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c -57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c -59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c -60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c -61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c -62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c -63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c -64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c -65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c -66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c -70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c -71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c -72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c -73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c -76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c -77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c -78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c -79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c -80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c -81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c -82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c -83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c -84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c -94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c -95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c -96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c -97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c -98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c -102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c -103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c -104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c -105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c -106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c -107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c -108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c -109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c -110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c -111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c -112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c -113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c -114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c -117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c -118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c -119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c -120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c -121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c -122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c -123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c -126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c -127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c -128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c -130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c -131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c -132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c -133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c -134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c -135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c -136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c -137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c -138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c -139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c -140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c -141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c -142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c -143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c -144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c -147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c -150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c -151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c -152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c -153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c -154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c -155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c -156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c -157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c -158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c -159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c -160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c -162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c -163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c -164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c -165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c -166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c -167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c -168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c -169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c -170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c -171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c -172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c -173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c -174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c -184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c -187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c -188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c -189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c -190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c -191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c -192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c -193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c -194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c -195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c -196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c -197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c -198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c -199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c -200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c -201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c -202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c -203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c -204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c -205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c -206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c -207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c -208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c -209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c -210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c -211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c -212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c -213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c -214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c -215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c -216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c -217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c -218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c -219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c -220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c -221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c -222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c -223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c -224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c -225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c -226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c -227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c -228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c -229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c -230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c -231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c -232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c -233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c -234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c -235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c -237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c -238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c -239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c -240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c -241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c -242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c -243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c -244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c -245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c -246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c -247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c -249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c -250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c -251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c -252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c -253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c -254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c -255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c -256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c -257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c -259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c -260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c -261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c -262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c -263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c -264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c -58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c -67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c -265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c -270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c -273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c -278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c -281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c -286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c -289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c -99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` -) - -func TestParseFedoraMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseUbuntuMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(ubuntuMountInfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseGentooMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(gentooMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseFedoraMountinfoFields(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - infos, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } - expectedLength := 58 - if len(infos) != expectedLength { - t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) - } - mi := Info{ - ID: 15, - Parent: 35, - Major: 0, - Minor: 3, - Root: "/", - Mountpoint: "/proc", - Opts: "rw,nosuid,nodev,noexec,relatime", - Optional: "shared:5", - Fstype: "proc", - Source: "proc", - VfsOpts: "rw", - } - - if *infos[0] != mi { - t.Fatalf("expected %#v, got %#v", mi, infos[0]) - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 2ecc8bae..fd16d3ed 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -7,6 +7,6 @@ import ( "runtime" ) -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go index 7ecba7c1..27e0f697 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -1,6 +1,6 @@ package mount // import "github.com/docker/docker/pkg/mount" -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go deleted file mode 100644 index 01951449..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go +++ /dev/null @@ -1,348 +0,0 @@ -// +build linux - -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "os" - "path" - "testing" - - "golang.org/x/sys/unix" -) - -// nothing is propagated in or out -func TestSubtreePrivate(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target private - if err := MakePrivate(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside1CheckPath) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -// Testing that when a target is a shared mount, -// then child mounts propagate to the source -func TestSubtreeShared(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outsideDir = path.Join(tmp, "outside") - - outsidePath = path.Join(outsideDir, "file.txt") - sourceCheckPath = path.Join(sourceDir, "a", "file.txt") - ) - - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outsideDir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outsidePath); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the target - if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // NOW, check that the file from the outside directory is available in the source directory - if _, err := os.Stat(sourceCheckPath); err != nil { - t.Fatal(err) - } -} - -// testing that mounts to a shared source show up in the slave target, -// and that mounts into a slave target do _not_ show up in the shared source -func TestSubtreeSharedSlave(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target slave - if err := MakeSlave(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil { - t.Fatal(err) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -func TestSubtreeUnbindable(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("root required") - } - - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - ) - if err := os.MkdirAll(sourceDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(targetDir, 0777); err != nil { - t.Fatal(err) - } - - // next, make the source unbindable - if err := MakeUnbindable(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // then attempt to mount it to target. It should fail - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != unix.EINVAL { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not have been bindable", sourceDir) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() -} - -func createFile(path string) error { - f, err := os.Create(path) - if err != nil { - return err - } - f.WriteString("hello world!") - return f.Close() -} diff --git a/vendor/github.com/docker/docker/pkg/pools/BUILD.bazel b/vendor/github.com/docker/docker/pkg/pools/BUILD.bazel deleted file mode 100644 index e35b8257..00000000 --- a/vendor/github.com/docker/docker/pkg/pools/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["pools.go"], - importpath = "github.com/docker/docker/pkg/pools", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/docker/docker/pkg/ioutils:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["pools_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/pools", - deps = [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go deleted file mode 100644 index 76015169..00000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package pools // import "github.com/docker/docker/pkg/pools" - -import ( - "bufio" - "bytes" - "io" - "strings" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - reader := BufioReader32KPool.Get(nil) - if reader == nil { - t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") - } -} - -func TestBufioReaderPoolPutAndGet(t *testing.T) { - sr := bufio.NewReader(strings.NewReader("foobar")) - reader := BufioReader32KPool.Get(sr) - if reader == nil { - t.Fatalf("BufioReaderPool should not return a nil reader.") - } - // verify the first 3 byte - buf1 := make([]byte, 3) - _, err := reader.Read(buf1) - if err != nil { - t.Fatal(err) - } - if actual := string(buf1); actual != "foo" { - t.Fatalf("The first letter should have been 'foo' but was %v", actual) - } - BufioReader32KPool.Put(reader) - // Try to read the next 3 bytes - _, err = sr.Read(make([]byte, 3)) - if err == nil || err != io.EOF { - t.Fatalf("The buffer should have been empty, issue an EOF error.") - } -} - -type simpleReaderCloser struct { - io.Reader - closed bool -} - -func (r *simpleReaderCloser) Close() error { - r.closed = true - return nil -} - -func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { - br := bufio.NewReader(strings.NewReader("")) - sr := &simpleReaderCloser{ - Reader: strings.NewReader("foobar"), - closed: false, - } - reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) - if reader == nil { - t.Fatalf("NewReadCloserWrapper should not return a nil reader.") - } - // Verify the content of reader - buf := make([]byte, 3) - _, err := reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "foo" { - t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) - } - reader.Close() - // Read 3 more bytes "bar" - _, err = reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "bar" { - t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) - } - if !sr.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - writer := BufioWriter32KPool.Get(nil) - if writer == nil { - t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") - } -} - -func TestBufioWriterPoolPutAndGet(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - writer := BufioWriter32KPool.Get(bw) - assert.Assert(t, writer != nil) - - written, err := writer.Write([]byte("foobar")) - assert.NilError(t, err) - assert.Check(t, is.Equal(6, written)) - - // Make sure we Flush all the way ? - writer.Flush() - bw.Flush() - assert.Check(t, is.Len(buf.Bytes(), 6)) - // Reset the buffer - buf.Reset() - BufioWriter32KPool.Put(writer) - // Try to write something - if _, err = writer.Write([]byte("barfoo")); err != nil { - t.Fatal(err) - } - // If we now try to flush it, it should panic (the writer is nil) - // recover it - defer func() { - if r := recover(); r == nil { - t.Fatal("Trying to flush the writter should have 'paniced', did not.") - } - }() - writer.Flush() -} - -type simpleWriterCloser struct { - io.Writer - closed bool -} - -func (r *simpleWriterCloser) Close() error { - r.closed = true - return nil -} - -func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - sw := &simpleWriterCloser{ - Writer: new(bytes.Buffer), - closed: false, - } - bw.Flush() - writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - writer.Close() - if !sw.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufferPoolPutAndGet(t *testing.T) { - buf := buffer32KPool.Get() - buffer32KPool.Put(buf) -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/BUILD.bazel b/vendor/github.com/docker/docker/pkg/stdcopy/BUILD.bazel deleted file mode 100644 index fefb4d74..00000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["stdcopy.go"], - importpath = "github.com/docker/docker/pkg/stdcopy", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["stdcopy_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/stdcopy", -) diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index 3b1e1873..8f6e0a73 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -21,7 +21,7 @@ const ( // Stderr represents standard error steam type. Stderr // Systemerr represents errors originating from the system that make it - // into the the multiplexed stream. + // into the multiplexed stream. Systemerr stdWriterPrefixLen = 8 diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go deleted file mode 100644 index 2ec0c320..00000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package stdcopy // import "github.com/docker/docker/pkg/stdcopy" - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestNewStdWriter(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - if writer == nil { - t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") - } -} - -func TestWriteWithUninitializedStdWriter(t *testing.T) { - writer := stdWriter{ - Writer: nil, - prefix: byte(Stdout), - } - n, err := writer.Write([]byte("Something here")) - if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") - } -} - -func TestWriteWithNilBytes(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - n, err := writer.Write(nil) - if err != nil { - t.Fatalf("Shouldn't have fail when given no data") - } - if n > 0 { - t.Fatalf("Write should have written 0 byte, but has written %d", n) - } -} - -func TestWrite(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test StdWrite.Write") - n, err := writer.Write(data) - if err != nil { - t.Fatalf("Error while writing with StdWrite") - } - if n != len(data) { - t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) - } -} - -type errWriter struct { - n int - err error -} - -func (f *errWriter) Write(buf []byte) (int, error) { - return f.n, f.err -} - -func TestWriteWithWriterError(t *testing.T) { - expectedError := errors.New("expected") - expectedReturnedBytes := 10 - writer := NewStdWriter(&errWriter{ - n: stdWriterPrefixLen + expectedReturnedBytes, - err: expectedError}, Stdout) - data := []byte("This won't get written, sigh") - n, err := writer.Write(data) - if err != expectedError { - t.Fatalf("Didn't get expected error.") - } - if n != expectedReturnedBytes { - t.Fatalf("Didn't get expected written bytes %d, got %d.", - expectedReturnedBytes, n) - } -} - -func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { - writer := NewStdWriter(&errWriter{n: -1}, Stdout) - data := []byte("This won't get written, sigh") - actual, _ := writer.Write(data) - if actual != 0 { - t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) - } -} - -func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { - buffer = new(bytes.Buffer) - dstOut := NewStdWriter(buffer, Stdout) - _, err = dstOut.Write(stdOutBytes) - if err != nil { - return - } - dstErr := NewStdWriter(buffer, Stderr) - _, err = dstErr.Write(stdErrBytes) - return -} - -func TestStdCopyWriteAndRead(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) - if err != nil { - t.Fatal(err) - } - expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) - if written != int64(expectedTotalWritten) { - t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) - } -} - -type customReader struct { - n int - err error - totalCalls int - correctCalls int - src *bytes.Buffer -} - -func (f *customReader) Read(buf []byte) (int, error) { - f.totalCalls++ - if f.totalCalls <= f.correctCalls { - return f.src.Read(buf) - } - return f.n, f.err -} - -func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { - expectedError := errors.New("error") - reader := &customReader{ - err: expectedError} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { - expectedError := errors.New("error") - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: expectedError, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyDetectsCorruptedFrame(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: io.EOF, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != startingBufLen { - t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) - } - if err != nil { - t.Fatal("Didn't get nil error") - } -} - -func TestStdCopyWithInvalidInputHeader(t *testing.T) { - dstOut := NewStdWriter(ioutil.Discard, Stdout) - dstErr := NewStdWriter(ioutil.Discard, Stderr) - src := strings.NewReader("Invalid input") - _, err := StdCopy(dstOut, dstErr, src) - if err == nil { - t.Fatal("StdCopy with invalid input header should fail.") - } -} - -func TestStdCopyWithCorruptedPrefix(t *testing.T) { - data := []byte{0x01, 0x02, 0x03} - src := bytes.NewReader(data) - written, err := StdCopy(nil, nil, src) - if err != nil { - t.Fatalf("StdCopy should not return an error with corrupted prefix.") - } - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } -} - -func TestStdCopyReturnsWriteErrors(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - expectedError := errors.New("expected") - - dstOut := &errWriter{err: expectedError} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error, got %v", err) - } -} - -func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - dstOut := &errWriter{n: startingBufLen - 10} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) - } - if err != io.ErrShortWrite { - t.Fatalf("Didn't get expected io.ErrShortWrite error") - } -} - -// TestStdCopyReturnsErrorFromSystem tests that StdCopy correctly returns an -// error, when that error is muxed into the Systemerr stream. -func TestStdCopyReturnsErrorFromSystem(t *testing.T) { - // write in the basic messages, just so there's some fluff in there - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - // add in an error message on the Systemerr stream - systemErrBytes := []byte(strings.Repeat("S", startingBufLen)) - systemWriter := NewStdWriter(buffer, Systemerr) - _, err = systemWriter.Write(systemErrBytes) - if err != nil { - t.Fatal(err) - } - - // now copy and demux. we should expect an error containing the string we - // wrote out - _, err = StdCopy(ioutil.Discard, ioutil.Discard, buffer) - if err == nil { - t.Fatal("expected error, got none") - } - if !strings.Contains(err.Error(), string(systemErrBytes)) { - t.Fatal("expected error to contain message") - } -} - -func BenchmarkWrite(b *testing.B) { - w := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test line for testing stdwriter performance\n") - data = bytes.Repeat(data, 100) - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := w.Write(data); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..5d80670b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..2ee8768d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/pkg/system/BUILD.bazel b/vendor/github.com/docker/docker/pkg/system/BUILD.bazel deleted file mode 100644 index 99cab481..00000000 --- a/vendor/github.com/docker/docker/pkg/system/BUILD.bazel +++ /dev/null @@ -1,289 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "chtimes.go", - "errors.go", - "exitcode.go", - "init.go", - "lcow.go", - "meminfo.go", - "path.go", - "rm.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "process_unix.go", - "stat_darwin.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "process_unix.go", - "stat_freebsd.go", - "stat_unix.go", - "syscall_unix.go", - "umask.go", - "utimes_freebsd.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_linux.go", - "mknod.go", - "process_unix.go", - "stat_linux.go", - "stat_unix.go", - "syscall_unix.go", - "umask.go", - "utimes_linux.go", - "xattrs_linux.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_openbsd.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "chtimes_unix.go", - "filesys.go", - "init_unix.go", - "lcow_unix.go", - "lstat_unix.go", - "meminfo_unsupported.go", - "mknod.go", - "stat_solaris.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "chtimes_windows.go", - "filesys_windows.go", - "init_windows.go", - "lcow_windows.go", - "lstat_windows.go", - "meminfo_windows.go", - "mknod_windows.go", - "process_windows.go", - "stat_windows.go", - "syscall_windows.go", - "umask_windows.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/system", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/containerd/continuity/pathdriver:go_default_library", - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - "//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Microsoft/go-winio:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "chtimes_test.go", - "rm_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "chtimes_unix_test.go", - "lstat_unix_test.go", - "meminfo_unix_test.go", - "stat_unix_test.go", - "utimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "chtimes_unix_test.go", - "lstat_unix_test.go", - "meminfo_unix_test.go", - "stat_unix_test.go", - "utimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "chtimes_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "chtimes_windows_test.go", - "path_windows_test.go", - "syscall_windows_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/system", - deps = [ - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/go-units:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/containerd/continuity/pathdriver:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go deleted file mode 100644 index 5a3f98e1..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" -) - -// prepareTempFile creates a temporary file in a temporary directory. -func prepareTempFile(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - return file, dir -} - -// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent -func TestChtimes(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go deleted file mode 100644 index e25232c7..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimesLinux tests Chtimes access time on a tempfile on Linux -func TestChtimesLinux(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat := f.Sys().(*syscall.Stat_t) - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go deleted file mode 100644 index d91e4bc6..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimesWindows tests Chtimes access time on a tempfile on Windows -func TestChtimesWindows(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go index 5c3fbfe6..5be3e218 100644 --- a/vendor/github.com/docker/docker/pkg/system/lcow.go +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -1,69 +1,32 @@ package system // import "github.com/docker/docker/pkg/system" import ( - "fmt" "runtime" "strings" specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) -// ValidatePlatform determines if a platform structure is valid. -// TODO This is a temporary function - can be replaced by parsing from -// https://github.com/containerd/containerd/pull/1403/files at a later date. -// @jhowardmsft -func ValidatePlatform(platform *specs.Platform) error { - platform.Architecture = strings.ToLower(platform.Architecture) - platform.OS = strings.ToLower(platform.OS) - // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do - // not support anything except operating system. - if platform.Architecture != "" { - return fmt.Errorf("invalid platform architecture %q", platform.Architecture) - } - if platform.OS != "" { - if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { - return fmt.Errorf("invalid platform os %q", platform.OS) - } - } - if len(platform.OSFeatures) != 0 { - return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) - } - if platform.OSVersion != "" { - return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) - } - if platform.Variant != "" { - return fmt.Errorf("invalid platform variant %q", platform.Variant) - } - return nil -} - -// ParsePlatform parses a platform string in the format os[/arch[/variant] -// into an OCI image-spec platform structure. -// TODO This is a temporary function - can be replaced by parsing from -// https://github.com/containerd/containerd/pull/1403/files at a later date. -// @jhowardmsft -func ParsePlatform(in string) *specs.Platform { - p := &specs.Platform{} - elements := strings.SplitN(strings.ToLower(in), "/", 3) - if len(elements) == 3 { - p.Variant = elements[2] - } - if len(elements) >= 2 { - p.Architecture = elements[1] - } - if len(elements) >= 1 { - p.OS = elements[0] - } - return p -} - // IsOSSupported determines if an operating system is supported by the host func IsOSSupported(os string) bool { - if runtime.GOOS == os { + if strings.EqualFold(runtime.GOOS, os) { return true } - if LCOWSupported() && os == "linux" { + if LCOWSupported() && strings.EqualFold(os, "linux") { return true } return false } + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if runtime.GOOS == "windows" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return errors.Errorf("unsupported os %s", platform.OS) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go deleted file mode 100644 index 9fb4a191..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go deleted file mode 100644 index c3690d63..00000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - "testing" - - "github.com/docker/go-units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 00000000..b0b93196 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 00000000..188f2c29 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p := syscall.StringToUTF16(path) + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go deleted file mode 100644 index 974707eb..00000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "testing" - - "github.com/containerd/continuity/pathdriver" -) - -// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter -func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { - // Fails if not C drive. - _, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver) - if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { - t.Fatalf("Expected error for d:") - } - - // Single character is unchanged - var path string - if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil { - t.Fatalf("Single character should pass") - } - if path != "z" { - t.Fatalf("Single character should be unchanged") - } - - // Two characters without colon is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil { - t.Fatalf("2 characters without colon should pass") - } - if path != "AB" { - t.Fatalf("2 characters without colon should be unchanged") - } - - // Abs path without drive letter - if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil { - t.Fatalf("abs path no drive letter should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter should be unchanged") - } - - // Abs path without drive letter, linux style - if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil { - t.Fatalf("abs path no drive letter linux style should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter linux failed %s", path) - } - - // Drive-colon should be stripped - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`An absolute path should have been shortened to \ %s`, path) - } - - // Verify with a linux-style path - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) - } - - // Failure on c: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "c:"` { - t.Fatalf(path, err) - } - - // Failure on d: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "d:"` { - t.Fatalf(path, err) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go index 02e4d262..b3109918 100644 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -34,7 +34,7 @@ func EnsureRemoveAll(dir string) error { for { err := os.RemoveAll(dir) if err == nil { - return err + return nil } pe, ok := err.(*os.PathError) diff --git a/vendor/github.com/docker/docker/pkg/system/rm_test.go b/vendor/github.com/docker/docker/pkg/system/rm_test.go deleted file mode 100644 index 3b12272d..00000000 --- a/vendor/github.com/docker/docker/pkg/system/rm_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/docker/docker/pkg/mount" -) - -func TestEnsureRemoveAllNotExist(t *testing.T) { - // should never return an error for a non-existent path - if err := EnsureRemoveAll("/non/existent/path"); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithDir(t *testing.T) { - dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") - if err != nil { - t.Fatal(err) - } - if err := EnsureRemoveAll(dir); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithFile(t *testing.T) { - tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") - if err != nil { - t.Fatal(err) - } - tmp.Close() - if err := EnsureRemoveAll(tmp.Name()); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithMount(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("mount not supported on Windows") - } - - dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") - if err != nil { - t.Fatal(err) - } - dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir2) - - bindDir := filepath.Join(dir1, "bind") - if err := os.MkdirAll(bindDir, 0755); err != nil { - t.Fatal(err) - } - - if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { - t.Fatal(err) - } - - done := make(chan struct{}) - go func() { - err = EnsureRemoveAll(dir1) - close(done) - }() - - select { - case <-done: - if err != nil { - t.Fatal(err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for EnsureRemoveAll to finish") - } - - if _, err := os.Stat(dir1); !os.IsNotExist(err) { - t.Fatalf("expected %q to not exist", dir1) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go deleted file mode 100644 index fd68a966..00000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - assert.NilError(t, err) - - s, err := fromStatT(stat) - assert.NilError(t, err) - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.UID() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.GID() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index ee7e0256..4ae92fa6 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -2,16 +2,62 @@ package system // import "github.com/docker/docker/pkg/system" import ( "fmt" + "syscall" "unsafe" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = iota + SE_FILE_OBJECT + SE_SERVICE + SE_PRINTER + SE_REGISTRY_KEY + SE_LMSHARE + SE_KERNEL_OBJECT + SE_WINDOW_OBJECT + SE_DS_OBJECT + SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) // OSVersion is a wrapper for Windows version information @@ -125,3 +171,23 @@ func HasWin32KSupport() bool { // APIs. return ntuserApiset.Load() == nil } + +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = syscall.Errno(e1) + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go deleted file mode 100644 index 8e78ba62..00000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "testing" - -func TestHasWin32KSupport(t *testing.T) { - s := HasWin32KSupport() // make sure this doesn't panic - - t.Logf("win32k: %v", s) // will be different on different platforms -- informative only -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go deleted file mode 100644 index cc0e7cbf..00000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/vendor/github.com/docker/docker/pkg/term/BUILD.bazel b/vendor/github.com/docker/docker/pkg/term/BUILD.bazel deleted file mode 100644 index 8e253f8c..00000000 --- a/vendor/github.com/docker/docker/pkg/term/BUILD.bazel +++ /dev/null @@ -1,131 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "ascii.go", - "proxy.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "tc.go", - "term.go", - "termios_bsd.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "tc.go", - "term.go", - "termios_bsd.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "tc.go", - "term.go", - "termios_linux.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "tc.go", - "term.go", - "termios_bsd.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "tc.go", - "term.go", - "winsize.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "term_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/term", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Azure/go-ansiterm/winterm:go_default_library", - "//vendor/github.com/docker/docker/pkg/term/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "ascii_test.go", - "proxy_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "term_linux_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/term", - deps = [ - "//vendor/github.com/gotestyourself/gotestyourself/assert:go_default_library", - "//vendor/github.com/gotestyourself/gotestyourself/assert/cmp:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go deleted file mode 100644 index 87bca8d4..00000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go deleted file mode 100644 index 321d1b87..00000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestToBytes(t *testing.T) { - codes, err := ToBytes("ctrl-a,a") - assert.NilError(t, err) - assert.Check(t, is.DeepEqual([]byte{1, 97}, codes)) - - _, err = ToBytes("shift-z") - assert.Check(t, is.ErrorContains(err, "")) - - codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - assert.NilError(t, err) - assert.Check(t, is.DeepEqual([]byte{0, 27, 126, 15}, codes)) - - codes, err = ToBytes("DEL,+") - assert.NilError(t, err) - assert.Check(t, is.DeepEqual([]byte{127, 43}, codes)) -} diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go deleted file mode 100644 index 5c4a5260..00000000 --- a/vendor/github.com/docker/docker/pkg/term/proxy.go +++ /dev/null @@ -1,74 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (int, error) { - nr, err := r.r.Read(buf) - - preserve := func() { - // this preserves the original key presses in the passed in buffer - nr += r.escapeKeyPos - preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf...) - r.escapeKeyPos = 0 - copy(buf[0:nr], preserve) - } - - if nr != 1 || err != nil { - if r.escapeKeyPos > 0 { - preserve() - } - return nr, err - } - - if buf[0] != r.escapeKeys[r.escapeKeyPos] { - if r.escapeKeyPos > 0 { - preserve() - } - return nr, nil - } - - if r.escapeKeyPos == len(r.escapeKeys)-1 { - return 0, EscapeError{} - } - - // Looks like we've got an escape key, but we need to match again on the next - // read. - // Store the current escape key we found so we can look for the next one on - // the next read. - // Since this is an escape key, make sure we don't let the caller read it - // If later on we find that this is not the escape sequence, we'll add the - // keys back - r.escapeKeyPos++ - return nr - r.escapeKeyPos, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/proxy_test.go b/vendor/github.com/docker/docker/pkg/term/proxy_test.go deleted file mode 100644 index 759be514..00000000 --- a/vendor/github.com/docker/docker/pkg/term/proxy_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "bytes" - "fmt" - "testing" - - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" -) - -func TestEscapeProxyRead(t *testing.T) { - escapeKeys, _ := ToBytes("DEL") - keys, _ := ToBytes("a,b,c,+") - reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf := make([]byte, len(keys)) - nr, err := reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) - assert.DeepEqual(t, keys, buf) - - keys, _ = ToBytes("") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, len(keys)) - nr, err = reader.Read(buf) - assert.Assert(t, is.ErrorContains(err, ""), "Should throw error when no keys are to read") - assert.Equal(t, nr, 0, "nr should be zero") - assert.Check(t, is.Len(keys, 0)) - assert.Check(t, is.Len(buf, 0)) - - escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") - keys, _ = ToBytes("DEL") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, len(keys)) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) - assert.DeepEqual(t, keys, buf) - - escapeKeys, _ = ToBytes("ctrl-c") - keys, _ = ToBytes("ctrl-c") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, len(keys)) - nr, err = reader.Read(buf) - assert.Error(t, err, "read escape sequence") - assert.Equal(t, nr, 0, "nr should be equal to 0") - assert.DeepEqual(t, keys, buf) - - escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") - keys, _ = ToBytes("ctrl-c,ctrl-z") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, 1) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, 0, "nr should be equal to 0") - assert.DeepEqual(t, keys[0:1], buf) - nr, err = reader.Read(buf) - assert.Error(t, err, "read escape sequence") - assert.Equal(t, nr, 0, "nr should be equal to 0") - assert.DeepEqual(t, keys[1:], buf) - - escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") - keys, _ = ToBytes("ctrl-c,DEL,+") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, 1) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, 0, "nr should be equal to 0") - assert.DeepEqual(t, keys[0:1], buf) - buf = make([]byte, len(keys)) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) - assert.DeepEqual(t, keys, buf) - - escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") - keys, _ = ToBytes("ctrl-c,DEL") - reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) - buf = make([]byte, 1) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, 0, "nr should be equal to 0") - assert.DeepEqual(t, keys[0:1], buf) - buf = make([]byte, len(keys)) - nr, err = reader.Read(buf) - assert.NilError(t, err) - assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) - assert.DeepEqual(t, keys, buf) -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go deleted file mode 100644 index 01bcaa8a..00000000 --- a/vendor/github.com/docker/docker/pkg/term/tc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index 0589a955..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term // import "github.com/docker/docker/pkg/term" - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_linux_test.go b/vendor/github.com/docker/docker/pkg/term/term_linux_test.go deleted file mode 100644 index 4f1d6758..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_linux_test.go +++ /dev/null @@ -1,117 +0,0 @@ -//+build linux - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/gotestyourself/gotestyourself/assert" -) - -// RequiresRoot skips tests that require root, unless the test.root flag has -// been set -func RequiresRoot(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("skipping test that requires root") - return - } -} - -func newTtyForTest(t *testing.T) (*os.File, error) { - RequiresRoot(t) - return os.OpenFile("/dev/tty", os.O_RDWR, os.ModeDevice) -} - -func newTempFile() (*os.File, error) { - return ioutil.TempFile(os.TempDir(), "temp") -} - -func TestGetWinsize(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - winSize, err := GetWinsize(tty.Fd()) - assert.NilError(t, err) - assert.Assert(t, winSize != nil) - - newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} - err = SetWinsize(tty.Fd(), &newSize) - assert.NilError(t, err) - winSize, err = GetWinsize(tty.Fd()) - assert.NilError(t, err) - assert.DeepEqual(t, *winSize, newSize, cmpWinsize) -} - -var cmpWinsize = cmp.AllowUnexported(Winsize{}) - -func TestSetWinsize(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - winSize, err := GetWinsize(tty.Fd()) - assert.NilError(t, err) - assert.Assert(t, winSize != nil) - newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} - err = SetWinsize(tty.Fd(), &newSize) - assert.NilError(t, err) - winSize, err = GetWinsize(tty.Fd()) - assert.NilError(t, err) - assert.DeepEqual(t, *winSize, newSize, cmpWinsize) -} - -func TestGetFdInfo(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - inFd, isTerminal := GetFdInfo(tty) - assert.Equal(t, inFd, tty.Fd()) - assert.Equal(t, isTerminal, true) - tmpFile, err := newTempFile() - assert.NilError(t, err) - defer tmpFile.Close() - inFd, isTerminal = GetFdInfo(tmpFile) - assert.Equal(t, inFd, tmpFile.Fd()) - assert.Equal(t, isTerminal, false) -} - -func TestIsTerminal(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - isTerminal := IsTerminal(tty.Fd()) - assert.Equal(t, isTerminal, true) - tmpFile, err := newTempFile() - assert.NilError(t, err) - defer tmpFile.Close() - isTerminal = IsTerminal(tmpFile.Fd()) - assert.Equal(t, isTerminal, false) -} - -func TestSaveState(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - state, err := SaveState(tty.Fd()) - assert.NilError(t, err) - assert.Assert(t, state != nil) - tty, err = newTtyForTest(t) - assert.NilError(t, err) - defer tty.Close() - err = RestoreTerminal(tty.Fd(), state) - assert.NilError(t, err) -} - -func TestDisableEcho(t *testing.T) { - tty, err := newTtyForTest(t) - defer tty.Close() - assert.NilError(t, err) - state, err := SetRawTerminal(tty.Fd()) - defer RestoreTerminal(tty.Fd(), state) - assert.NilError(t, err) - assert.Assert(t, state != nil) - err = DisableEcho(tty.Fd(), state) - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index 64ead3c5..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,228 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "io" - "os" - "os/signal" - "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { - // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and - // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as - // go-ansiterm hasn't switch to x/sys/windows. - // TODO: switch back to x/sys/windows once go-ansiterm has switched - if emulateStdin { - stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windowsconsole.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go deleted file mode 100644 index 48f25ce7..00000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build darwin freebsd openbsd - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - newState.Oflag &^= unix.OPOST - newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - newState.Cflag &^= (unix.CSIZE | unix.PARENB) - newState.Cflag |= unix.CS8 - newState.Cc[unix.VMIN] = 1 - newState.Cc[unix.VTIME] = 0 - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 6d4c63fd..00000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,39 +0,0 @@ -package term // import "github.com/docker/docker/pkg/term" - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - termios, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - - var oldState State - oldState.termios = Termios(*termios) - - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/BUILD.bazel b/vendor/github.com/docker/docker/pkg/term/windows/BUILD.bazel deleted file mode 100644 index 2284579b..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "windows.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "ansi_reader.go", - "ansi_writer.go", - "console.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/term/windows", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/Azure/go-ansiterm:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Azure/go-ansiterm/winterm:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = ["windows_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/docker/pkg/term/windows", -) diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 1d7c452c..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 7799a03f..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index 52740197..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index 1f896596..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go deleted file mode 100644 index 80cda601..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go +++ /dev/null @@ -1,3 +0,0 @@ -// This file is necessary to pass the Docker tests. - -package windowsconsole // import "github.com/docker/docker/pkg/term/windows" diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go deleted file mode 100644 index a19663ad..00000000 --- a/vendor/github.com/docker/docker/pkg/term/winsize.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term // import "github.com/docker/docker/pkg/term" - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml deleted file mode 100644 index 7afb79ea..00000000 --- a/vendor/github.com/docker/docker/poule.yml +++ /dev/null @@ -1,128 +0,0 @@ -# Add a "status/0-triage" to every newly opened pull request. -- triggers: - pull_request: [ opened ] - operations: - - type: label - filters: { - ~labels: [ "status/0-triage", "status/1-design-review", "status/2-code-review", "status/3-docs-review", "status/4-merge" ], - } - settings: { - patterns: { - status/0-triage: [ ".*" ], - } - } - -# For every newly created or modified issue, assign label based on matching regexp using the `label` -# operation, as well as an Engine-specific version label using `version-label`. -- triggers: - issues: [ edited, opened, reopened ] - operations: - - type: label - settings: { - patterns: { - area/builder: [ "dockerfile", "docker build" ], - area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], - area/plugins: [ "docker plugin" ], - area/networking: [ "docker network", "ipvs", "vxlan" ], - area/runtime: [ "oci runtime error" ], - area/security/trust: [ "docker_content_trust" ], - area/swarm: [ "docker node", "docker swarm", "docker service create", "docker service inspect", "docker service logs", "docker service ls", "docker service ps", "docker service rm", "docker service scale", "docker service update" ], - platform/desktop: [ "docker for mac", "docker for windows" ], - platform/freebsd: [ "freebsd" ], - platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], - platform/arm: [ "raspberry", "raspbian", "rpi", "beaglebone", "pine64" ], - } - } - - type: version-label - -# Labeling a PR with `rebuild/` triggers a rebuild job for the associated -# configuration. The label is automatically removed after the rebuild is initiated. There's no such -# thing as "templating" in this configuration, so we need one operation for each type of -# configuration that can be triggered. -- triggers: - pull_request: [ labeled ] - operations: - - type: rebuild - settings: { - # When configurations are empty, the `rebuild` operation rebuilds all the currently - # known statuses for that pull request. - configurations: [], - label: "rebuild/*", - } - - type: rebuild - settings: { - configurations: [ arm ], - label: "rebuild/arm", - } - - type: rebuild - settings: { - configurations: [ experimental ], - label: "rebuild/experimental", - } - - type: rebuild - settings: { - configurations: [ janky ], - label: "rebuild/janky", - } - - type: rebuild - settings: { - configurations: [ powerpc ], - label: "rebuild/powerpc", - } - - type: rebuild - settings: { - configurations: [ userns ], - label: "rebuild/userns", - } - - type: rebuild - settings: { - configurations: [ vendor ], - label: "rebuild/vendor", - } - - type: rebuild - settings: { - configurations: [ win2lin ], - label: "rebuild/win2lin", - } - - type: rebuild - settings: { - configurations: [ windowsRS1 ], - label: "rebuild/windowsRS1", - } - - type: rebuild - settings: { - configurations: [ z ], - label: "rebuild/z", - } - -# Once a day, randomly assign pull requests older than 2 weeks. -- schedule: "@daily" - operations: - - type: random-assign - filters: { - age: "2w", - is: "pr", - } - settings: { - users: [ - "aaronlehmann", - "akihirosuda", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "dnephin", - "duglin", - "johnstep", - "justincormack", - "mhbauer", - "mlaventure", - "runcom", - "stevvooe", - "thajeztah", - "tiborvass", - "tonistiigi", - "vdemeester", - "vieux", - "yongtang", - ] - } diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTING.md b/vendor/github.com/docker/docker/project/CONTRIBUTING.md new file mode 120000 index 00000000..44fcc634 --- /dev/null +++ b/vendor/github.com/docker/docker/project/CONTRIBUTING.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf deleted file mode 100644 index 74a94e01..00000000 --- a/vendor/github.com/docker/docker/vendor.conf +++ /dev/null @@ -1,152 +0,0 @@ -# the following lines are in sorted order, FYI -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/Microsoft/hcsshim v0.6.8 -github.com/Microsoft/go-winio v0.4.6 -github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a -github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git -github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a -github.com/gorilla/context v1.1 -github.com/gorilla/mux v1.1 -github.com/Microsoft/opengcs v0.3.6 -github.com/kr/pty 5cf931ef8f -github.com/mattn/go-shellwords v1.0.3 -github.com/sirupsen/logrus v1.0.3 -github.com/tchap/go-patricia v2.2.6 -github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 -golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd -github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 -golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 -github.com/pmezard/go-difflib v1.0.0 -github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029 -github.com/google/go-cmp v0.2.0 - -github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 -github.com/imdario/mergo 0.2.1 -golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 - -github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8 -github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2 - -#get libnetwork packages - -# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly -github.com/docker/libnetwork 5c1218c956c99f3365711974e300087810c31379 -github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 -github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 -github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec -github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c -github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 -github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d -github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e -github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 -github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef -github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e - -# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv accordingly -github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 -github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 -github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd v3.2.1 -github.com/coreos/go-semver v0.2.0 -github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 -github.com/hashicorp/consul v0.5.2 -github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 -github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 -github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb - -# get graph and distribution packages -github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c -github.com/vbatts/tar-split v0.10.2 -github.com/opencontainers/go-digest v1.0.0-rc1 - -# get go-zfs packages -github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa -github.com/pborman/uuid v1.0 - -google.golang.org/grpc v1.3.0 - -# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly -github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871 -github.com/opencontainers/runtime-spec v1.0.1 -github.com/opencontainers/image-spec v1.0.1 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 - -# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) -github.com/coreos/go-systemd v15 -github.com/godbus/dbus v4.0.0 -github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 - -# gelf logging driver deps -github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841 - -github.com/fluent/fluent-logger-golang v1.3.0 -# fluent-logger-golang deps -github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 -github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad - -# fsnotify -github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1 - -# awslogs deps -github.com/aws/aws-sdk-go v1.12.66 -github.com/go-ini/ini v1.25.4 -github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 - -# logentries -github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf - -# gcplogs deps -golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 -google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 -cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 -github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 -google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 - -# containerd -github.com/containerd/containerd 3fa104f843ec92328912e042b767d26825f202aa -github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 -github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 -github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 -github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880 -github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f -github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 -github.com/dmcgowan/go-tar go1.10 -github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 - -# cluster -github.com/docker/swarmkit 831df679a0b8a21b4dccd5791667d030642de7ff -github.com/gogo/protobuf v0.4 -github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a -github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 -github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e -golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad -github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 -github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 -github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 -github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 -github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 -github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 - -# cli -github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git -github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty - -# metrics -github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 - -github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd diff --git a/vendor/github.com/docker/go-connections/BUILD.bazel b/vendor/github.com/docker/go-connections/BUILD.bazel deleted file mode 100644 index 797982b8..00000000 --- a/vendor/github.com/docker/go-connections/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importpath = "github.com/docker/go-connections", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/go-connections/CONTRIBUTING.md b/vendor/github.com/docker/go-connections/CONTRIBUTING.md deleted file mode 100644 index 926dcc93..00000000 --- a/vendor/github.com/docker/go-connections/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing to Docker - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-connections/MAINTAINERS b/vendor/github.com/docker/go-connections/MAINTAINERS deleted file mode 100644 index 477be8b2..00000000 --- a/vendor/github.com/docker/go-connections/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md deleted file mode 100644 index d257e44f..00000000 --- a/vendor/github.com/docker/go-connections/README.md +++ /dev/null @@ -1,13 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections) - -# Introduction - -go-connections provides common package to work with network connections. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation. - -## License - -go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-connections/circle.yml b/vendor/github.com/docker/go-connections/circle.yml deleted file mode 100644 index 8a82ee82..00000000 --- a/vendor/github.com/docker/go-connections/circle.yml +++ /dev/null @@ -1,14 +0,0 @@ -dependencies: - pre: - # setup ipv6 - - sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 net.ipv6.conf.default.disable_ipv6=0 net.ipv6.conf.all.disable_ipv6=0 - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-connections/doc.go b/vendor/github.com/docker/go-connections/doc.go deleted file mode 100644 index 43e27247..00000000 --- a/vendor/github.com/docker/go-connections/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package connections provides libraries to work with network connections. -// This library is divided in several components for specific usage. -package connections diff --git a/vendor/github.com/docker/go-connections/nat/BUILD.bazel b/vendor/github.com/docker/go-connections/nat/BUILD.bazel deleted file mode 100644 index 35abba42..00000000 --- a/vendor/github.com/docker/go-connections/nat/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "nat.go", - "parse.go", - "sort.go", - ], - importpath = "github.com/docker/go-connections/nat", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "nat_test.go", - "parse_test.go", - "sort_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/go-connections/nat", - deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], -) diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 4d5f5ae6..bb7e4e33 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) { } func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } diff --git a/vendor/github.com/docker/go-connections/nat/nat_test.go b/vendor/github.com/docker/go-connections/nat/nat_test.go deleted file mode 100644 index 787d5ac2..00000000 --- a/vendor/github.com/docker/go-connections/nat/nat_test.go +++ /dev/null @@ -1,583 +0,0 @@ -package nat - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParsePort(t *testing.T) { - var ( - p int - err error - ) - - p, err = ParsePort("1234") - - if err != nil || p != 1234 { - t.Fatal("Parsing '1234' did not succeed") - } - - // FIXME currently this is a valid port. I don't think it should be. - // I'm leaving this test commented out until we make a decision. - // - erikh - - /* - p, err = ParsePort("0123") - - if err != nil { - t.Fatal("Successfully parsed port '0123' to '123'") - } - */ - - p, err = ParsePort("asdf") - - if err == nil || p != 0 { - t.Fatal("Parsing port 'asdf' succeeded") - } - - p, err = ParsePort("1asdf") - - if err == nil || p != 0 { - t.Fatal("Parsing port '1asdf' succeeded") - } -} - -func TestParsePortRangeToInt(t *testing.T) { - var ( - begin int - end int - err error - ) - - type TestRange struct { - Range string - Begin int - End int - } - validRanges := []TestRange{ - {"1234", 1234, 1234}, - {"1234-1234", 1234, 1234}, - {"1234-1235", 1234, 1235}, - {"8000-9000", 8000, 9000}, - {"0", 0, 0}, - {"0-0", 0, 0}, - } - - for _, r := range validRanges { - begin, end, err = ParsePortRangeToInt(r.Range) - - if err != nil || begin != r.Begin { - t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) - } - if err != nil || end != r.End { - t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) - } - } - - invalidRanges := []string{ - "asdf", - "1asdf", - "9000-8000", - "9000-", - "-8000", - "-8000-", - } - - for _, r := range invalidRanges { - begin, end, err = ParsePortRangeToInt(r) - - if err == nil || begin != 0 || end != 0 { - t.Fatalf("Parsing port range '%s' succeeded", r) - } - } -} - -func TestPort(t *testing.T) { - p, err := NewPort("tcp", "1234") - - if err != nil { - t.Fatalf("tcp, 1234 had a parsing issue: %v", err) - } - - if string(p) != "1234/tcp" { - t.Fatal("tcp, 1234 did not result in the string 1234/tcp") - } - - if p.Proto() != "tcp" { - t.Fatal("protocol was not tcp") - } - - if p.Port() != "1234" { - t.Fatal("port string value was not 1234") - } - - if p.Int() != 1234 { - t.Fatal("port int value was not 1234") - } - - p, err = NewPort("tcp", "asd1234") - if err == nil { - t.Fatal("tcp, asd1234 was supposed to fail") - } - - p, err = NewPort("tcp", "1234-1230") - if err == nil { - t.Fatal("tcp, 1234-1230 was supposed to fail") - } - - p, err = NewPort("tcp", "1234-1242") - if err != nil { - t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) - } - - if string(p) != "1234-1242/tcp" { - t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") - } -} - -func TestSplitProtoPort(t *testing.T) { - var ( - proto string - port string - ) - - proto, port = SplitProtoPort("1234/tcp") - - if proto != "tcp" || port != "1234" { - t.Fatal("Could not split 1234/tcp properly") - } - - proto, port = SplitProtoPort("") - - if proto != "" || port != "" { - t.Fatal("parsing an empty string yielded surprising results", proto, port) - } - - proto, port = SplitProtoPort("1234") - - if proto != "tcp" || port != "1234" { - t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) - } - - proto, port = SplitProtoPort("1234/") - - if proto != "tcp" || port != "1234" { - t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) - } - - proto, port = SplitProtoPort("/tcp") - - if proto != "" || port != "" { - t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) - } -} - -func TestParsePortSpecFull(t *testing.T) { - portMappings, err := ParsePortSpec("0.0.0.0:1234-1235:3333-3334/tcp") - assert.Nil(t, err) - - expected := []PortMapping{ - { - Port: "3333/tcp", - Binding: PortBinding{ - HostIP: "0.0.0.0", - HostPort: "1234", - }, - }, - { - Port: "3334/tcp", - Binding: PortBinding{ - HostIP: "0.0.0.0", - HostPort: "1235", - }, - }, - } - - assert.Equal(t, expected, portMappings) -} - -func TestPartPortSpecIPV6(t *testing.T) { - portMappings, err := ParsePortSpec("[2001:4860:0:2001::68]::333") - assert.Nil(t, err) - - expected := []PortMapping{ - { - Port: "333/tcp", - Binding: PortBinding{ - HostIP: "2001:4860:0:2001::68", - HostPort: "", - }, - }, - } - assert.Equal(t, expected, portMappings) -} - -func TestPartPortSpecIPV6WithHostPort(t *testing.T) { - portMappings, err := ParsePortSpec("[::1]:80:80") - assert.Nil(t, err) - - expected := []PortMapping{ - { - Port: "80/tcp", - Binding: PortBinding{ - HostIP: "::1", - HostPort: "80", - }, - }, - } - assert.Equal(t, expected, portMappings) -} - -func TestParsePortSpecs(t *testing.T) { - var ( - portMap map[Port]struct{} - bindingMap map[Port][]PortBinding - err error - ) - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != "" { - t.Fatalf("HostPort should not be set for %s", portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1234/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2345/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "0.0.0.0" { - t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) - - if err == nil { - t.Fatal("Received no error while trying to parse a hostname instead of ip") - } -} - -func TestParsePortSpecsWithRange(t *testing.T) { - var ( - portMap map[Port]struct{} - bindingMap map[Port][]PortBinding - err error - ) - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != "" { - t.Fatalf("HostPort should not be set for %s", portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - if len(bindings) != 1 { - t.Fatalf("%s should have exactly one binding", portspec) - } - - if bindings[0].HostIP != "" { - t.Fatalf("HostIP should not be set for %s", portspec) - } - - if bindings[0].HostPort != port { - t.Fatalf("HostPort should be %s for %s", port, portspec) - } - } - - portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) - - if err != nil { - t.Fatalf("Error while processing ParsePortSpecs: %s", err) - } - - if _, ok := portMap[Port("1235/tcp")]; !ok { - t.Fatal("1234/tcp was not parsed properly") - } - - if _, ok := portMap[Port("2346/udp")]; !ok { - t.Fatal("2345/udp was not parsed properly") - } - - for portspec, bindings := range bindingMap { - _, port := SplitProtoPort(string(portspec)) - if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { - t.Fatalf("Expect single binding to port %s but found %s", port, bindings) - } - } - - _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) - - if err == nil { - t.Fatal("Received no error while trying to parse a hostname instead of ip") - } -} - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublicNoPort(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) - - if err == nil { - t.Logf("Expected error Invalid containerPort") - t.Fail() - } - if ports != nil { - t.Logf("Expected nil got %s", ports) - t.Fail() - } - if bindings != nil { - t.Logf("Expected nil got %s", bindings) - t.Fail() - } -} - -func TestParseNetworkOptsNegativePorts(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) - - if err == nil { - t.Fail() - } - if len(ports) != 0 { - t.Logf("Expected nil got %d", len(ports)) - t.Fail() - } - if len(bindings) != 0 { - t.Logf("Expected 0 got %d", len(bindings)) - t.Fail() - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIP != "192.168.1.100" { - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/go-connections/nat/parse_test.go b/vendor/github.com/docker/go-connections/nat/parse_test.go deleted file mode 100644 index 2ac204a0..00000000 --- a/vendor/github.com/docker/go-connections/nat/parse_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package nat - -import ( - "strings" - "testing" -) - -func TestParsePortRange(t *testing.T) { - if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { - t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) - } -} - -func TestParsePortRangeEmpty(t *testing.T) { - if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { - t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) - } -} - -func TestParsePortRangeWithNoRange(t *testing.T) { - start, end, err := ParsePortRange("8080") - if err != nil { - t.Fatal(err) - } - if start != 8080 || end != 8080 { - t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) - } -} - -func TestParsePortRangeIncorrectRange(t *testing.T) { - if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectEndRange(t *testing.T) { - if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectStartRange(t *testing.T) { - if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} diff --git a/vendor/github.com/docker/go-connections/nat/sort_test.go b/vendor/github.com/docker/go-connections/nat/sort_test.go deleted file mode 100644 index 88ed9111..00000000 --- a/vendor/github.com/docker/go-connections/nat/sort_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package nat - -import ( - "fmt" - "reflect" - "testing" -) - -func TestSortUniquePorts(t *testing.T) { - ports := []Port{ - Port("6379/tcp"), - Port("22/tcp"), - } - - Sort(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "22/tcp" { - t.Log(fmt.Sprint(first)) - t.Fail() - } -} - -func TestSortSamePortWithDifferentProto(t *testing.T) { - ports := []Port{ - Port("8888/tcp"), - Port("8888/udp"), - Port("6379/tcp"), - Port("6379/udp"), - } - - Sort(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "6379/tcp" { - t.Fail() - } -} - -func TestSortPortMap(t *testing.T) { - ports := []Port{ - Port("22/tcp"), - Port("22/udp"), - Port("8000/tcp"), - Port("6379/tcp"), - Port("9999/tcp"), - } - - portMap := PortMap{ - Port("22/tcp"): []PortBinding{ - {}, - }, - Port("8000/tcp"): []PortBinding{ - {}, - }, - Port("6379/tcp"): []PortBinding{ - {}, - {HostIP: "0.0.0.0", HostPort: "32749"}, - }, - Port("9999/tcp"): []PortBinding{ - {HostIP: "0.0.0.0", HostPort: "40000"}, - }, - } - - SortPortMap(ports, portMap) - if !reflect.DeepEqual(ports, []Port{ - Port("9999/tcp"), - Port("6379/tcp"), - Port("8000/tcp"), - Port("22/tcp"), - Port("22/udp"), - }) { - t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) - } - if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ - {HostIP: "0.0.0.0", HostPort: "32749"}, - {}, - }) { - t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) - } -} diff --git a/vendor/github.com/docker/go-connections/sockets/BUILD.bazel b/vendor/github.com/docker/go-connections/sockets/BUILD.bazel deleted file mode 100644 index 6badf1eb..00000000 --- a/vendor/github.com/docker/go-connections/sockets/BUILD.bazel +++ /dev/null @@ -1,73 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "inmem_socket.go", - "proxy.go", - "sockets.go", - "tcp_socket.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "sockets_unix.go", - "unix_socket.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "sockets_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/go-connections/sockets", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/proxy:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Microsoft/go-winio:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = ["inmem_socket_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/go-connections/sockets", -) diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go deleted file mode 100644 index 24dc1d10..00000000 --- a/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package sockets - -import "testing" - -func TestInmemSocket(t *testing.T) { - l := NewInmemSocket("test", 0) - defer l.Close() - go func() { - for { - conn, err := l.Accept() - if err != nil { - return - } - conn.Write([]byte("hello")) - conn.Close() - } - }() - - conn, err := l.Dial("test", "test") - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 5) - _, err = conn.Read(buf) - if err != nil { - t.Fatal(err) - } - - if string(buf) != "hello" { - t.Fatalf("expected `hello`, got %s", string(buf)) - } - - l.Close() - conn, err = l.Dial("test", "test") - if err != errClosed { - t.Fatalf("expected `errClosed` error, got %v", err) - } -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel b/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel deleted file mode 100644 index 2ce651c0..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "certpool_go17.go", - "certpool_other.go", - "config.go", - "config_client_ciphers.go", - "config_legacy_client_ciphers.go", - ], - importpath = "github.com/docker/go-connections/tlsconfig", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/pkg/errors:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["config_test.go"], - embed = [":go_default_library"], - importpath = "github.com/docker/go-connections/tlsconfig", -) diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go index 9ca97453..1ff81c33 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -4,7 +4,6 @@ package tlsconfig import ( "crypto/x509" - ) // SystemCertPool returns an new empty cert pool, diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 1b31bbb8..0ef3fdcb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls @@ -65,22 +63,34 @@ var allTLSVersions = map[uint16]struct{}{ } // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault() *tls.Config { - return &tls.Config{ +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go deleted file mode 100644 index 02131d6b..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go +++ /dev/null @@ -1,651 +0,0 @@ -package tlsconfig - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "io/ioutil" - "os" - "reflect" - "testing" -) - -// This is the currently active LetsEncrypt IdenTrust cross-signed CA cert. It expires Mar 17, 2021. -const ( - systemRootTrustedCert = ` ------BEGIN CERTIFICATE----- -MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ -MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT -DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow -SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT -GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF -q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 -SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 -Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA -a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj -/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T -AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG -CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv -bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k -c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw -VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC -ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz -MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu -Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF -AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo -uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ -wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu -X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG -PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 -KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== ------END CERTIFICATE----- -` - rsaPrivateKeyFile = "fixtures/key.pem" - certificateFile = "fixtures/cert.pem" - multiCertificateFile = "fixtures/multi.pem" - rsaEncryptedPrivateKeyFile = "fixtures/encrypted_key.pem" - certificateOfEncryptedKeyFile = "fixtures/cert_of_encrypted_key.pem" -) - -// returns the name of a pre-generated, multiple-certificate CA file -// with both RSA and ECDSA certs. -func getMultiCert() string { - return multiCertificateFile -} - -// returns the names of pre-generated key and certificate files. -func getCertAndKey() (string, string) { - return rsaPrivateKeyFile, certificateFile -} - -// returns the names of pre-generated, encrypted private key and -// corresponding certificate file -func getCertAndEncryptedKey() (string, string) { - return rsaEncryptedPrivateKeyFile, certificateOfEncryptedKeyFile -} - -// If the cert files and directory are provided but are invalid, an error is -// returned. -func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { - key, cert := getCertAndKey() - ca := getMultiCert() - - tempFile, err := ioutil.TempFile("", "cert-test") - if err != nil { - t.Fatal("Unable to create temporary empty file") - } - defer os.RemoveAll(tempFile.Name()) - tempFile.Close() - - for _, badFile := range []string{"not-a-file", tempFile.Name()} { - for i := 0; i < 3; i++ { - files := []string{cert, key, ca} - files[i] = badFile - - result, err := Server(Options{ - CertFile: files[0], - KeyFile: files[1], - CAFile: files[2], - ClientAuth: tls.VerifyClientCertIfGiven, - }) - if err == nil || result != nil { - t.Fatal("Expected a non-real file to error and return a nil TLS config") - } - } - } -} - -// If server cert and key are provided and client auth and client CA are not -// set, a tls config with only the server certs will be returned. -func TestConfigServerTLSServerCertsOnly(t *testing.T) { - key, cert := getCertAndKey() - - keypair, err := tls.LoadX509KeyPair(cert, key) - if err != nil { - t.Fatal("Unable to load the generated cert and key") - } - - tlsConfig, err := Server(Options{ - CertFile: cert, - KeyFile: key, - }) - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - if len(tlsConfig.Certificates) != 1 { - t.Fatal("Unexpected server certificates") - } - if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { - t.Fatal("Unexpected server certificates") - } - for i, cert := range tlsConfig.Certificates[0].Certificate { - if !bytes.Equal(cert, keypair.Certificate[i]) { - t.Fatal("Unexpected server certificates") - } - } - - if !reflect.DeepEqual(tlsConfig.CipherSuites, DefaultServerAcceptedCiphers) { - t.Fatal("Unexpected server cipher suites") - } - if !tlsConfig.PreferServerCipherSuites { - t.Fatal("Expected server to prefer cipher suites") - } - if tlsConfig.MinVersion != tls.VersionTLS10 { - t.Fatal("Unexpected server TLS version") - } -} - -// If client CA is provided, it will only be used if the client auth is >= -// VerifyClientCertIfGiven -func TestConfigServerTLSClientCANotSetIfClientAuthTooLow(t *testing.T) { - key, cert := getCertAndKey() - ca := getMultiCert() - - tlsConfig, err := Server(Options{ - CertFile: cert, - KeyFile: key, - ClientAuth: tls.RequestClientCert, - CAFile: ca, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - if len(tlsConfig.Certificates) != 1 { - t.Fatal("Unexpected server certificates") - } - if tlsConfig.ClientAuth != tls.RequestClientCert { - t.Fatal("ClientAuth was not set to what was in the options") - } - if tlsConfig.ClientCAs != nil { - t.Fatalf("Client CAs should never have been set") - } -} - -// If client CA is provided, it will only be used if the client auth is >= -// VerifyClientCertIfGiven -func TestConfigServerTLSClientCASet(t *testing.T) { - key, cert := getCertAndKey() - ca := getMultiCert() - - tlsConfig, err := Server(Options{ - CertFile: cert, - KeyFile: key, - ClientAuth: tls.VerifyClientCertIfGiven, - CAFile: ca, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - if len(tlsConfig.Certificates) != 1 { - t.Fatal("Unexpected server certificates") - } - if tlsConfig.ClientAuth != tls.VerifyClientCertIfGiven { - t.Fatal("ClientAuth was not set to what was in the options") - } - basePool, err := SystemCertPool() - if err != nil { - basePool = x509.NewCertPool() - } - // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots - if tlsConfig.ClientCAs == nil || len(tlsConfig.ClientCAs.Subjects()) != len(basePool.Subjects())+2 { - t.Fatalf("Client CAs were never set correctly") - } -} - -// Exclusive root pools determines whether the CA pool will be a union of the system -// certificate pool and custom certs, or an exclusive or of the custom certs and system pool -func TestConfigServerExclusiveRootPools(t *testing.T) { - key, cert := getCertAndKey() - ca := getMultiCert() - - caBytes, err := ioutil.ReadFile(ca) - if err != nil { - t.Fatal("Unable to read CA certs", err) - } - - var testCerts []*x509.Certificate - for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { - pemBlock, _ := pem.Decode(pemBytes) - if pemBlock == nil { - t.Fatal("Malformed certificate") - } - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - t.Fatal("Unable to parse certificate") - } - testCerts = append(testCerts, cert) - } - - // ExclusiveRootPools not set, so should be able to verify both system-signed certs - // and custom CA-signed certs - tlsConfig, err := Server(Options{ - CertFile: cert, - KeyFile: key, - ClientAuth: tls.VerifyClientCertIfGiven, - CAFile: ca, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - for i, cert := range testCerts { - if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}); err != nil { - t.Fatalf("Unable to verify certificate %d: %v", i, err) - } - } - - // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable - // and custom CA-signed certs should be verifiable - tlsConfig, err = Server(Options{ - CertFile: cert, - KeyFile: key, - ClientAuth: tls.VerifyClientCertIfGiven, - CAFile: ca, - ExclusiveRootPools: true, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - for i, cert := range testCerts { - _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) - switch { - case i == 0 && err != nil: - t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) - case i == 1 && err == nil: - t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) - } - } - - // No CA file provided, system cert should be verifiable only - tlsConfig, err = Server(Options{ - CertFile: cert, - KeyFile: key, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - for i, cert := range testCerts { - _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) - switch { - case i == 1 && err != nil: - t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) - case i == 0 && err == nil: - t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) - } - } -} - -// If a valid minimum version is specified in the options, the server's -// minimum version should be set accordingly -func TestConfigServerTLSMinVersionIsSetBasedOnOptions(t *testing.T) { - versions := []uint16{ - tls.VersionTLS11, - tls.VersionTLS12, - } - key, cert := getCertAndKey() - - for _, v := range versions { - tlsConfig, err := Server(Options{ - MinVersion: v, - CertFile: cert, - KeyFile: key, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure server TLS", err) - } - - if tlsConfig.MinVersion != v { - t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) - } - } -} - -// An error should be returned if the specified minimum version for the server -// is too low, i.e. less than VersionTLS10 -func TestConfigServerTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { - key, cert := getCertAndKey() - - _, err := Server(Options{ - MinVersion: tls.VersionSSL30, - CertFile: cert, - KeyFile: key, - }) - - if err == nil { - t.Fatal("Should have returned an error for minimum version below TLS10") - } -} - -// An error should be returned if an invalid minimum version for the server is -// in the options struct -func TestConfigServerTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { - key, cert := getCertAndKey() - - _, err := Server(Options{ - MinVersion: 1, - CertFile: cert, - KeyFile: key, - }) - - if err == nil { - t.Fatal("Should have returned error on invalid minimum version option") - } -} - -// The root CA is never set if InsecureSkipBoolean is set to true, but the -// default client options are set -func TestConfigClientTLSNoVerify(t *testing.T) { - ca := getMultiCert() - - tlsConfig, err := Client(Options{CAFile: ca, InsecureSkipVerify: true}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if tlsConfig.RootCAs != nil { - t.Fatal("Should not have set Root CAs", err) - } - - if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { - t.Fatal("Unexpected client cipher suites") - } - if tlsConfig.MinVersion != tls.VersionTLS12 { - t.Fatal("Unexpected client TLS version") - } - - if tlsConfig.Certificates != nil { - t.Fatal("Somehow client certificates were set") - } -} - -// The root CA is never set if InsecureSkipBoolean is set to false and root CA -// is not provided. -func TestConfigClientTLSNoRoot(t *testing.T) { - tlsConfig, err := Client(Options{}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if tlsConfig.RootCAs != nil { - t.Fatal("Should not have set Root CAs", err) - } - - if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { - t.Fatal("Unexpected client cipher suites") - } - if tlsConfig.MinVersion != tls.VersionTLS12 { - t.Fatal("Unexpected client TLS version") - } - - if tlsConfig.Certificates != nil { - t.Fatal("Somehow client certificates were set") - } -} - -// The RootCA is set if the file is provided and InsecureSkipVerify is false -func TestConfigClientTLSRootCAFileWithOneCert(t *testing.T) { - ca := getMultiCert() - - tlsConfig, err := Client(Options{CAFile: ca}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - basePool, err := SystemCertPool() - if err != nil { - basePool = x509.NewCertPool() - } - // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots - if tlsConfig.RootCAs == nil || len(tlsConfig.RootCAs.Subjects()) != len(basePool.Subjects())+2 { - t.Fatal("Root CAs not set properly", err) - } - if tlsConfig.Certificates != nil { - t.Fatal("Somehow client certificates were set") - } -} - -// An error is returned if a root CA is provided but the file doesn't exist. -func TestConfigClientTLSNonexistentRootCAFile(t *testing.T) { - tlsConfig, err := Client(Options{CAFile: "nonexistent"}) - - if err == nil || tlsConfig != nil { - t.Fatal("Should not have been able to configure client TLS", err) - } -} - -// An error is returned if either the client cert or the key are provided -// but invalid or blank. -func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { - key, cert := getCertAndKey() - - tempFile, err := ioutil.TempFile("", "cert-test") - if err != nil { - t.Fatal("Unable to create temporary empty file") - } - defer os.Remove(tempFile.Name()) - tempFile.Close() - - for i := 0; i < 2; i++ { - for _, invalid := range []string{"not-a-file", "", tempFile.Name()} { - files := []string{cert, key} - files[i] = invalid - - tlsConfig, err := Client(Options{CertFile: files[0], KeyFile: files[1]}) - if err == nil || tlsConfig != nil { - t.Fatal("Should not have been able to configure client TLS", err) - } - } - } -} - -// The certificate is set if the client cert and client key are provided and -// valid. -func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { - key, cert := getCertAndKey() - - keypair, err := tls.LoadX509KeyPair(cert, key) - if err != nil { - t.Fatal("Unable to load the generated cert and key") - } - - tlsConfig, err := Client(Options{CertFile: cert, KeyFile: key}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if len(tlsConfig.Certificates) != 1 { - t.Fatal("Unexpected client certificates") - } - if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { - t.Fatal("Unexpected client certificates") - } - for i, cert := range tlsConfig.Certificates[0].Certificate { - if !bytes.Equal(cert, keypair.Certificate[i]) { - t.Fatal("Unexpected client certificates") - } - } - - if tlsConfig.RootCAs != nil { - t.Fatal("Root CAs should not have been set", err) - } -} - -// The certificate is set if the client cert and encrypted client key are -// provided and valid and passphrase can decrypt the key -func TestConfigClientTLSValidClientCertAndEncryptedKey(t *testing.T) { - key, cert := getCertAndEncryptedKey() - - tlsConfig, err := Client(Options{ - CertFile: cert, - KeyFile: key, - Passphrase: "FooBar123", - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if len(tlsConfig.Certificates) != 1 { - t.Fatal("Unexpected client certificates") - } -} - -// The certificate is not set if the provided passphrase cannot decrypt -// the encrypted key. -func TestConfigClientTLSNotSetWithInvalidPassphrase(t *testing.T) { - key, cert := getCertAndEncryptedKey() - - tlsConfig, err := Client(Options{ - CertFile: cert, - KeyFile: key, - Passphrase: "InvalidPassphrase", - }) - - if !IsErrEncryptedKey(err) || tlsConfig != nil { - t.Fatal("Expected failure due to incorrect passphrase.") - } -} - -// Exclusive root pools determines whether the CA pool will be a union of the system -// certificate pool and custom certs, or an exclusive or of the custom certs and system pool -func TestConfigClientExclusiveRootPools(t *testing.T) { - ca := getMultiCert() - - caBytes, err := ioutil.ReadFile(ca) - if err != nil { - t.Fatal("Unable to read CA certs", err) - } - - var testCerts []*x509.Certificate - for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { - pemBlock, _ := pem.Decode(pemBytes) - if pemBlock == nil { - t.Fatal("Malformed certificate") - } - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - t.Fatal("Unable to parse certificate") - } - testCerts = append(testCerts, cert) - } - - // ExclusiveRootPools not set, so should be able to verify both system-signed certs - // and custom CA-signed certs - tlsConfig, err := Client(Options{CAFile: ca}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - for i, cert := range testCerts { - if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}); err != nil { - t.Fatalf("Unable to verify certificate %d: %v", i, err) - } - } - - // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable - // and custom CA-signed certs should be verifiable - tlsConfig, err = Client(Options{ - CAFile: ca, - ExclusiveRootPools: true, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - for i, cert := range testCerts { - _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) - switch { - case i == 0 && err != nil: - t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) - case i == 1 && err == nil: - t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) - } - } - - // No CA file provided, system cert should be verifiable only - tlsConfig, err = Client(Options{}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - for i, cert := range testCerts { - _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) - switch { - case i == 1 && err != nil: - t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) - case i == 0 && err == nil: - t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) - } - } -} - -// If a valid MinVersion is specified in the options, the client's -// minimum version should be set accordingly -func TestConfigClientTLSMinVersionIsSetBasedOnOptions(t *testing.T) { - key, cert := getCertAndKey() - - tlsConfig, err := Client(Options{ - MinVersion: tls.VersionTLS12, - CertFile: cert, - KeyFile: key, - }) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if tlsConfig.MinVersion != tls.VersionTLS12 { - t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) - } -} - -// An error should be returned if the specified minimum version for the client -// is too low, i.e. less than VersionTLS12 -func TestConfigClientTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { - key, cert := getCertAndKey() - - _, err := Client(Options{ - MinVersion: tls.VersionTLS11, - CertFile: cert, - KeyFile: key, - }) - - if err == nil { - t.Fatal("Should have returned an error for minimum version below TLS12") - } -} - -// An error should be returned if an invalid minimum version for the client is -// in the options struct -func TestConfigClientTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { - key, cert := getCertAndKey() - - _, err := Client(Options{ - MinVersion: 1, - CertFile: cert, - KeyFile: key, - }) - - if err == nil { - t.Fatal("Should have returned error on invalid minimum version option") - } -} diff --git a/vendor/github.com/docker/go-units/BUILD.bazel b/vendor/github.com/docker/go-units/BUILD.bazel deleted file mode 100644 index 370932d1..00000000 --- a/vendor/github.com/docker/go-units/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "duration.go", - "size.go", - "ulimit.go", - ], - importpath = "github.com/docker/go-units", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "duration_test.go", - "size_test.go", - "ulimit_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/docker/go-units", -) diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d78..00000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 9b3b6b10..00000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "suda.akihiro@lab.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e1..00000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b354..00000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration_test.go b/vendor/github.com/docker/go-units/duration_test.go deleted file mode 100644 index e436c383..00000000 --- a/vendor/github.com/docker/go-units/duration_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "testing" - "time" -) - -func ExampleHumanDuration() { - fmt.Println(HumanDuration(450 * time.Millisecond)) - fmt.Println(HumanDuration(47 * time.Second)) - fmt.Println(HumanDuration(1 * time.Minute)) - fmt.Println(HumanDuration(3 * time.Minute)) - fmt.Println(HumanDuration(35 * time.Minute)) - fmt.Println(HumanDuration(35*time.Minute + 40*time.Second)) - fmt.Println(HumanDuration(1 * time.Hour)) - fmt.Println(HumanDuration(1*time.Hour + 45*time.Minute)) - fmt.Println(HumanDuration(3 * time.Hour)) - fmt.Println(HumanDuration(3*time.Hour + 59*time.Minute)) - fmt.Println(HumanDuration(3*time.Hour + 60*time.Minute)) - fmt.Println(HumanDuration(24 * time.Hour)) - fmt.Println(HumanDuration(24*time.Hour + 12*time.Hour)) - fmt.Println(HumanDuration(2 * 24 * time.Hour)) - fmt.Println(HumanDuration(7 * 24 * time.Hour)) - fmt.Println(HumanDuration(13*24*time.Hour + 5*time.Hour)) - fmt.Println(HumanDuration(2 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(2*7*24*time.Hour + 4*24*time.Hour)) - fmt.Println(HumanDuration(3 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(4 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(4*7*24*time.Hour + 3*24*time.Hour)) - fmt.Println(HumanDuration(1 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(1*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(2 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(3*30*24*time.Hour + 1*7*24*time.Hour)) - fmt.Println(HumanDuration(5*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(13 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(23 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(24 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(24*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(3*365*24*time.Hour + 2*30*24*time.Hour)) -} - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "1 second", HumanDuration(1*time.Second)) - assertEquals(t, "45 seconds", HumanDuration(45*time.Second)) - assertEquals(t, "46 seconds", HumanDuration(46*time.Second)) - assertEquals(t, "59 seconds", HumanDuration(59*time.Second)) - assertEquals(t, "About a minute", HumanDuration(60*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "45 minutes", HumanDuration(45*time.Minute)) - assertEquals(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(46*time.Minute)) - assertEquals(t, "About an hour", HumanDuration(59*time.Minute)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+29*time.Minute)) - assertEquals(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute)) - assertEquals(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "2 months", HumanDuration(2*month)) - assertEquals(t, "2 months", HumanDuration(2*month+2*week)) - assertEquals(t, "3 months", HumanDuration(3*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/vendor/github.com/docker/go-units/size_test.go b/vendor/github.com/docker/go-units/size_test.go deleted file mode 100644 index ab389ec5..00000000 --- a/vendor/github.com/docker/go-units/size_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package units - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "testing" -) - -func ExampleBytesSize() { - fmt.Println(BytesSize(1024)) - fmt.Println(BytesSize(1024 * 1024)) - fmt.Println(BytesSize(1048576)) - fmt.Println(BytesSize(2 * MiB)) - fmt.Println(BytesSize(3.42 * GiB)) - fmt.Println(BytesSize(5.372 * TiB)) - fmt.Println(BytesSize(2.22 * PiB)) -} - -func ExampleHumanSize() { - fmt.Println(HumanSize(1000)) - fmt.Println(HumanSize(1024)) - fmt.Println(HumanSize(1000000)) - fmt.Println(HumanSize(1048576)) - fmt.Println(HumanSize(2 * MB)) - fmt.Println(HumanSize(float64(3.42 * GB))) - fmt.Println(HumanSize(float64(5.372 * TB))) - fmt.Println(HumanSize(float64(2.22 * PB))) -} - -func ExampleFromHumanSize() { - fmt.Println(FromHumanSize("32")) - fmt.Println(FromHumanSize("32b")) - fmt.Println(FromHumanSize("32B")) - fmt.Println(FromHumanSize("32k")) - fmt.Println(FromHumanSize("32K")) - fmt.Println(FromHumanSize("32kb")) - fmt.Println(FromHumanSize("32Kb")) - fmt.Println(FromHumanSize("32Mb")) - fmt.Println(FromHumanSize("32Gb")) - fmt.Println(FromHumanSize("32Tb")) - fmt.Println(FromHumanSize("32Pb")) -} - -func ExampleRAMInBytes() { - fmt.Println(RAMInBytes("32")) - fmt.Println(RAMInBytes("32b")) - fmt.Println(RAMInBytes("32B")) - fmt.Println(RAMInBytes("32k")) - fmt.Println(RAMInBytes("32K")) - fmt.Println(RAMInBytes("32kb")) - fmt.Println(RAMInBytes("32Kb")) - fmt.Println(RAMInBytes("32Mb")) - fmt.Println(RAMInBytes("32Gb")) - fmt.Println(RAMInBytes("32Tb")) - fmt.Println(RAMInBytes("32Pb")) - fmt.Println(RAMInBytes("32PB")) - fmt.Println(RAMInBytes("32P")) -} - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1KiB", BytesSize(1024)) - assertEquals(t, "1MiB", BytesSize(1024*1024)) - assertEquals(t, "1MiB", BytesSize(1048576)) - assertEquals(t, "2MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22PiB", BytesSize(2.22*PiB)) - assertEquals(t, "1.049e+06YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1kB", HumanSize(1000)) - assertEquals(t, "1.024kB", HumanSize(1024)) - assertEquals(t, "1MB", HumanSize(1000000)) - assertEquals(t, "1.049MB", HumanSize(1048576)) - assertEquals(t, "2MB", HumanSize(2*MB)) - assertEquals(t, "3.42GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22PB", HumanSize(float64(2.22*PB))) - assertEquals(t, "1e+04YB", HumanSize(float64(10000000000000*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5kB") - assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5 kB") - assertSuccessEquals(t, 32, FromHumanSize, "32.5 B") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, ".3kB") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kib") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32KIB") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertSuccessEquals(t, 32, RAMInBytes, "32.3") - tmp := 32.3 * MiB - assertSuccessEquals(t, int64(tmp), RAMInBytes, "32.3 mb") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/vendor/github.com/docker/go-units/ulimit_test.go b/vendor/github.com/docker/go-units/ulimit_test.go deleted file mode 100644 index 902e0236..00000000 --- a/vendor/github.com/docker/go-units/ulimit_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "testing" -) - -func ExampleParseUlimit() { - fmt.Println(ParseUlimit("nofile=512:1024")) - fmt.Println(ParseUlimit("nofile=1024")) - fmt.Println(ParseUlimit("cpu=2:4")) - fmt.Println(ParseUlimit("cpu=6")) -} - -func TestParseUlimitValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := ParseUlimit("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseUlimitInvalidLimitType(t *testing.T) { - if _, err := ParseUlimit("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseUlimitBadFormat(t *testing.T) { - if _, err := ParseUlimit("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := ParseUlimit("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := ParseUlimit("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := ParseUlimit("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := ParseUlimit("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseUlimitHardLessThanSoft(t *testing.T) { - if _, err := ParseUlimit("nofile=1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseUlimitInvalidValueType(t *testing.T) { - if _, err := ParseUlimit("nofile=asdf"); err == nil { - t.Fatal("expected error on bad value type, but got no error") - } else if _, ok := err.(*strconv.NumError); !ok { - t.Fatalf("expected error on bad value type, but got `%s`", err) - } - - if _, err := ParseUlimit("nofile=1024:asdf"); err == nil { - t.Fatal("expected error on bad value type, but got no error") - } else if _, ok := err.(*strconv.NumError); !ok { - t.Fatalf("expected error on bad value type, but got `%s`", err) - } -} - -func TestParseUlimitTooManyValueArgs(t *testing.T) { - if _, err := ParseUlimit("nofile=1024:1:50"); err == nil { - t.Fatalf("expected error on more than two value arguments") - } -} - -func TestUlimitStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} - -func TestGetRlimit(t *testing.T) { - tt := []struct { - ulimit Ulimit - rlimit Rlimit - }{ - {Ulimit{"core", 10, 12}, Rlimit{rlimitCore, 10, 12}}, - {Ulimit{"cpu", 1, 10}, Rlimit{rlimitCPU, 1, 10}}, - {Ulimit{"data", 5, 0}, Rlimit{rlimitData, 5, 0}}, - {Ulimit{"fsize", 2, 2}, Rlimit{rlimitFsize, 2, 2}}, - {Ulimit{"locks", 0, 0}, Rlimit{rlimitLocks, 0, 0}}, - {Ulimit{"memlock", 10, 10}, Rlimit{rlimitMemlock, 10, 10}}, - {Ulimit{"msgqueue", 9, 1}, Rlimit{rlimitMsgqueue, 9, 1}}, - {Ulimit{"nice", 9, 9}, Rlimit{rlimitNice, 9, 9}}, - {Ulimit{"nofile", 4, 100}, Rlimit{rlimitNofile, 4, 100}}, - {Ulimit{"nproc", 5, 5}, Rlimit{rlimitNproc, 5, 5}}, - {Ulimit{"rss", 0, 5}, Rlimit{rlimitRss, 0, 5}}, - {Ulimit{"rtprio", 100, 65}, Rlimit{rlimitRtprio, 100, 65}}, - {Ulimit{"rttime", 55, 102}, Rlimit{rlimitRttime, 55, 102}}, - {Ulimit{"sigpending", 14, 20}, Rlimit{rlimitSigpending, 14, 20}}, - {Ulimit{"stack", 1, 1}, Rlimit{rlimitStack, 1, 1}}, - } - - for _, te := range tt { - res, err := te.ulimit.GetRlimit() - if err != nil { - t.Errorf("expected not to fail: %s", err) - } - if res.Type != te.rlimit.Type { - t.Errorf("expected Type to be %d but got %d", - te.rlimit.Type, res.Type) - } - if res.Soft != te.rlimit.Soft { - t.Errorf("expected Soft to be %d but got %d", - te.rlimit.Soft, res.Soft) - } - if res.Hard != te.rlimit.Hard { - t.Errorf("expected Hard to be %d but got %d", - te.rlimit.Hard, res.Hard) - } - - } -} - -func TestGetRlimitBadUlimitName(t *testing.T) { - name := "bla" - uLimit := Ulimit{name, 0, 0} - if _, err := uLimit.GetRlimit(); err == nil { - t.Error("expected error on bad Ulimit name") - } -} diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/docker/libnetwork/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/libnetwork/client/mflag/LICENSE b/vendor/github.com/docker/libnetwork/client/mflag/LICENSE new file mode 100644 index 00000000..9b4f4a29 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/client/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go new file mode 100644 index 00000000..f8eca58e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipamutils/utils.go @@ -0,0 +1,96 @@ +// Package ipamutils provides utility functions for ipam management +package ipamutils + +import ( + "fmt" + "net" + "sync" + + "github.com/sirupsen/logrus" +) + +var ( + // PredefinedBroadNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGranularNetworks` + PredefinedBroadNetworks []*net.IPNet + // PredefinedGranularNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedBroadNetworks` + PredefinedGranularNetworks []*net.IPNet + initNetworksOnce sync.Once + + defaultBroadNetwork = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, + {"192.168.0.0/16", 20}} + defaultGranularNetwork = []*NetworkToSplit{{"10.0.0.0/8", 24}} +) + +// NetworkToSplit represent a network that has to be split in chunks with mask length Size. +// Each subnet in the set is derived from the Base pool. Base is to be passed +// in CIDR format. +// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 +// 10.10.[0-255].0/24 address pools +type NetworkToSplit struct { + Base string `json:"base"` + Size int `json:"size"` +} + +// InitNetworks initializes the broad network pool and the granular network pool +func InitNetworks(defaultAddressPool []*NetworkToSplit) { + initNetworksOnce.Do(func() { + // error ingnored should never fail + PredefinedGranularNetworks, _ = splitNetworks(defaultGranularNetwork) + if defaultAddressPool == nil { + defaultAddressPool = defaultBroadNetwork + } + var err error + if PredefinedBroadNetworks, err = splitNetworks(defaultAddressPool); err != nil { + logrus.WithError(err).Error("InitAddressPools failed to initialize the default address pool") + } + }) +} + +// splitNetworks takes a slice of networks, split them accordingly and returns them +func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { + localPools := make([]*net.IPNet, 0, len(list)) + + for _, p := range list { + _, b, err := net.ParseCIDR(p.Base) + if err != nil { + return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) + } + ones, _ := b.Mask.Size() + if p.Size <= 0 || p.Size < ones { + return nil, fmt.Errorf("invalid pools size: %d", p.Size) + } + localPools = append(localPools, splitNetwork(p.Size, b)...) + } + return localPools, nil +} + +func splitNetwork(size int, base *net.IPNet) []*net.IPNet { + one, bits := base.Mask.Size() + mask := net.CIDRMask(size, bits) + n := 1 << uint(size-one) + s := uint(bits - size) + list := make([]*net.IPNet, 0, n) + + for i := 0; i < n; i++ { + ip := copyIP(base.IP) + addIntToIP(ip, uint(i<= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index ef22245e..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# temporary symlink for testing -testing/data/symlink -Gopkg.lock -vendor/ diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index 7da0371a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: go -sudo: required -go: - - 1.9.x - - 1.10.x -os: - - linux - - osx -env: - matrix: - - GOARCH=amd64 DOCKER_PKG_VERSION=18.03.0~ce-0~ubuntu - - GOARCH=386 DOCKER_PKG_VERSION=18.03.0~ce-0~ubuntu - - GOARCH=amd64 DOCKER_PKG_VERSION=18.02.0~ce-0~ubuntu - - GOARCH=386 DOCKER_PKG_VERSION=18.02.0~ce-0~ubuntu - global: - - DOCKER_HOST=tcp://127.0.0.1:2375 -install: - - make testdeps - - travis_retry travis-scripts/install-docker.bash -script: - - travis-scripts/run-tests.bash -services: - - docker -matrix: - fast_finish: true - exclude: - - os: osx - env: GOARCH=amd64 DOCKER_PKG_VERSION=18.02.0~ce-0~ubuntu - - os: osx - env: GOARCH=386 DOCKER_PKG_VERSION=18.02.0~ce-0~ubuntu diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index f944f395..a76e3b17 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -51,6 +51,7 @@ Damon Wang Dan Williams Daniel, Dao Quang Minh Daniel Garcia +Daniel Hess Daniel Hiltgen Daniel Nephin Daniel Tsui @@ -79,6 +80,7 @@ Flavia Missi Florent Aide Francisco Souza Frank Groeneveld +George MacRorie George Moura Grégoire Delattre Guilherme Rezende @@ -141,6 +143,7 @@ Orivej Desh Paul Bellamy Paul Morie Paul Weil +Peng Yin Peter Edge Peter Jihoon Kim Peter Teich @@ -184,6 +187,7 @@ Tonic ttyh061 upccup Victor Marmol +Vijay Krishnan Vincenzo Prignano Vlad Alexandru Ionescu Weitao Zhou diff --git a/vendor/github.com/fsouza/go-dockerclient/BUILD.bazel b/vendor/github.com/fsouza/go-dockerclient/BUILD.bazel deleted file mode 100644 index ecc76a60..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/BUILD.bazel +++ /dev/null @@ -1,183 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "change.go", - "client.go", - "container.go", - "distribution.go", - "env.go", - "event.go", - "exec.go", - "image.go", - "misc.go", - "network.go", - "plugin.go", - "signal.go", - "swarm.go", - "swarm_configs.go", - "swarm_node.go", - "swarm_secrets.go", - "swarm_service.go", - "swarm_task.go", - "tar.go", - "tls.go", - "volume.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "client_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "client_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/fsouza/go-dockerclient", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/docker/opts:go_default_library", - "//vendor/github.com/docker/docker/pkg/archive:go_default_library", - "//vendor/github.com/docker/docker/pkg/fileutils:go_default_library", - "//vendor/github.com/docker/docker/pkg/homedir:go_default_library", - "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", - "//vendor/github.com/docker/docker/pkg/stdcopy:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Microsoft/go-winio:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "auth_test.go", - "build_test.go", - "change_test.go", - "client_test.go", - "container_test.go", - "distribution_test.go", - "env_test.go", - "event_test.go", - "exec_test.go", - "image_test.go", - "misc_test.go", - "network_test.go", - "plugins_test.go", - "swarm_configs_test.go", - "swarm_node_test.go", - "swarm_secrets_test.go", - "swarm_service_test.go", - "swarm_task_test.go", - "swarm_test.go", - "volume_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "client_stress_test.go", - "client_unix_test.go", - "container_unix_test.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "client_windows_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/fsouza/go-dockerclient", - deps = [ - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/docker/pkg/archive:go_default_library", - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Microsoft/go-winio:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_xtest", - srcs = ["example_test.go"], - importpath = "github.com/fsouza/go-dockerclient_test", - deps = [":go_default_library"], -) diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 70663447..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml deleted file mode 100644 index 95fdf86f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml +++ /dev/null @@ -1,24 +0,0 @@ -[[constraint]] - name = "github.com/Microsoft/go-winio" - version = "v0.4.5" - -[[constraint]] - name = "github.com/docker/docker" - revision = "a422774e593b33bd287d9890544ad9e09b380d8c" - -[[constraint]] - name = "github.com/docker/go-units" - version = "v0.3.2" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "v0.2.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "v1.5.0" - -[[override]] - name = "github.com/Nvveen/Gotty" - source = "https://github.com/ijc25/Gotty.git" - revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c" diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index 479b07b0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -.PHONY: \ - all \ - lint \ - vet \ - fmtcheck \ - pretest \ - test \ - integration - -all: test - -lint: - @ go get -v golang.org/x/lint/golint - [ -z "$$(golint . | grep -v 'type name will be used as docker.DockerInfo' | grep -v 'context.Context should be the first' | tee /dev/stderr)" ] - -vet: - go vet ./... - -fmtcheck: - [ -z "$$(gofmt -s -d *.go ./testing | tee /dev/stderr)" ] - -testdeps: - go get -u github.com/golang/dep/cmd/dep - dep ensure -v - -pretest: testdeps lint vet fmtcheck - -gotest: - go test -race ./... - -test: pretest gotest - -integration: - go test -tags docker_integration -run TestIntegration -v diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index 86824d6c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,133 +0,0 @@ -# go-dockerclient - -[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient) -[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API -documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, err := docker.NewClient(endpoint) - if err != nil { - panic(err) - } - imgs, err := client.ListImages(docker.ListImagesOptions{All: false}) - if err != nil { - panic(err) - } - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use -NewTLSClient, passing the endpoint and path for key and certificates as -parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another -application that exports environment variables `DOCKER_HOST`, -`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile). -* [go vet](https://golang.org/cmd/vet/) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not -automatically call ``gofmt -s``, `make fmt` will format all go files in this -repository. - -## Vendoring - -go-dockerclient uses [dep](https://github.com/golang/dep/) for vendoring. If -you're using dep, you should be able to pick go-dockerclient releases and get -the proper dependencies. - -With other vendoring tools, users might need to specify go-dockerclient's -dependencies manually. - -## Using with Docker 1.9 and Go 1.4 - -There's a tag for using go-dockerclient with Docker 1.9 (which requires -compiling go-dockerclient with Go 1.4), the tag name is ``docker-1.9/go-1.4``. - -The instructions below can be used to get a version of go-dockerclient that compiles with Go 1.4: - -``` -% git clone -b docker-1.9/go-1.4 https://github.com/fsouza/go-dockerclient.git $GOPATH/src/github.com/fsouza/go-dockerclient -% git clone -b v1.9.1 https://github.com/docker/docker.git $GOPATH/src/github.com/docker/docker -% go get github.com/fsouza/go-dockerclient -``` diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml deleted file mode 100644 index ba7424c5..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: '{build}' -platform: x64 -clone_depth: 2 -clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient -environment: - GOPATH: c:\gopath - matrix: - - GOVERSION: 1.9.5 - - GOVERSION: 1.10.1 -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip - - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL -build_script: - - go get -u github.com/golang/dep/cmd/dep - - dep ensure -v -test_script: - - for /f "" %%G in ('go list ./... ^| find /i /v "/vendor/"') do ( go test %%G & IF ERRORLEVEL == 1 EXIT 1) -matrix: - fast_finish: true diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index c58de867..4335d6e0 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -27,6 +27,14 @@ type AuthConfiguration struct { Password string `json:"password,omitempty"` Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken can be supplied with the identitytoken response of the AuthCheck call + // see https://godoc.org/github.com/docker/docker/api/types#AuthConfig + // It can be used in place of password not in conjunction with it + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken can be supplied with the registrytoken + RegistryToken string `json:"registrytoken,omitempty"` } // AuthConfigurations represents authentication options to use for the @@ -42,8 +50,10 @@ type AuthConfigurations119 map[string]AuthConfiguration // dockerConfig represents a registry authentation configuration from the // .dockercfg file. type dockerConfig struct { - Auth string `json:"auth"` - Email string `json:"email"` + Auth string `json:"auth"` + Email string `json:"email"` + IdentityToken string `json:"identitytoken"` + RegistryToken string `json:"registrytoken"` } // NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON @@ -128,6 +138,7 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { c := &AuthConfigurations{ Configs: make(map[string]AuthConfiguration), } + for reg, conf := range confs { if conf.Auth == "" { continue @@ -136,17 +147,33 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { if err != nil { return nil, err } + userpass := strings.SplitN(string(data), ":", 2) if len(userpass) != 2 { return nil, ErrCannotParseDockercfg } - c.Configs[reg] = AuthConfiguration{ + + authConfig := AuthConfiguration{ Email: conf.Email, Username: userpass[0], Password: userpass[1], ServerAddress: reg, } + + // if identitytoken provided then zero the password and set it + if conf.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = conf.IdentityToken + } + + // if registrytoken provided then zero the password and set it + if conf.RegistryToken != "" { + authConfig.Password = "" + authConfig.RegistryToken = conf.RegistryToken + } + c.Configs[reg] = authConfig } + return c, nil } diff --git a/vendor/github.com/fsouza/go-dockerclient/auth_test.go b/vendor/github.com/fsouza/go-dockerclient/auth_test.go deleted file mode 100644 index 2f57aa1a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/auth_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "os" - "path" - "strings" - "testing" -) - -func TestAuthConfigurationSearchPath(t *testing.T) { - t.Parallel() - var testData = []struct { - dockerConfigEnv string - homeEnv string - expectedPaths []string - }{ - {"", "", []string{}}, - {"", "home", []string{path.Join("home", ".docker", "config.json"), path.Join("home", ".dockercfg")}}, - {"docker_config", "", []string{path.Join("docker_config", "config.json")}}, - {"a", "b", []string{path.Join("a", "config.json"), path.Join("b", ".docker", "config.json"), path.Join("b", ".dockercfg")}}, - } - for _, tt := range testData { - paths := cfgPaths(tt.dockerConfigEnv, tt.homeEnv) - if got, want := strings.Join(paths, ","), strings.Join(tt.expectedPaths, ","); got != want { - t.Errorf("cfgPaths: wrong result. Want: %s. Got: %s", want, got) - } - } -} - -func TestAuthConfigurationsFromFile(t *testing.T) { - t.Parallel() - tmpDir, err := ioutil.TempDir("", "go-dockerclient-auth-test") - if err != nil { - t.Errorf("Unable to create temporary directory for TestAuthConfigurationsFromFile: %s", err) - } - defer os.RemoveAll(tmpDir) - authString := base64.StdEncoding.EncodeToString([]byte("user:pass")) - content := fmt.Sprintf(`{"auths":{"foo": {"auth": "%s"}}}`, authString) - configFile := path.Join(tmpDir, "docker_config") - if err = ioutil.WriteFile(configFile, []byte(content), 0600); err != nil { - t.Errorf("Error writing auth config for TestAuthConfigurationsFromFile: %s", err) - } - auths, err := NewAuthConfigurationsFromFile(configFile) - if err != nil { - t.Errorf("Error calling NewAuthConfigurationsFromFile: %s", err) - } - if _, hasKey := auths.Configs["foo"]; !hasKey { - t.Errorf("Returned auths did not include expected auth key foo") - } -} - -func TestAuthLegacyConfig(t *testing.T) { - t.Parallel() - auth := base64.StdEncoding.EncodeToString([]byte("user:pa:ss")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pa:ss"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthBadConfig(t *testing.T) { - t.Parallel() - auth := base64.StdEncoding.EncodeToString([]byte("userpass")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != ErrCannotParseDockercfg { - t.Errorf("Incorrect error returned %v\n", err) - } - if ac != nil { - t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac) - } -} - -func TestAuthMixedWithKeyChain(t *testing.T) { - t.Parallel() - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{},"localhost:5000":{"auth":"%s"}},"credsStore":"osxkeychain"}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Fatal(err) - } - c, ok := ac.Configs["localhost:5000"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain localhost:5000") - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "localhost:5000"; got != want { - t.Errorf(`AuthConfigurations.Configs["localhost:5000"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthAndOtherFields(t *testing.T) { - t.Parallel() - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{ - "auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}, - "detachKeys": "ctrl-e,e", - "HttpHeaders": { "MyHeader": "MyValue" }}`, auth)) - - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} -func TestAuthConfig(t *testing.T) { - t.Parallel() - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthCheck(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - if _, err := client.AuthCheck(nil); err == nil { - t.Fatalf("expected error on nil auth config") - } - // test good auth - if _, err := client.AuthCheck(&AuthConfiguration{}); err != nil { - t.Fatal(err) - } - *fakeRT = FakeRoundTripper{status: http.StatusUnauthorized} - if _, err := client.AuthCheck(&AuthConfiguration{}); err == nil { - t.Fatal("expected failure from unauthorized auth") - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/build_test.go b/vendor/github.com/fsouza/go-dockerclient/build_test.go deleted file mode 100644 index 174cca4a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/build_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -func TestBuildImageMultipleContextsError(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - CacheFrom: []string{"a", "b", "c"}, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - ContextDir: "testing/data", - } - err := client.BuildImage(opts) - if err != ErrMultipleContexts { - t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error") - } -} - -func TestBuildImageContextDirDockerignoreParsing(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - - if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil { - t.Errorf("error creating symlink on demand: %s", err) - } - defer func() { - if err := os.Remove("testing/data/symlink"); err != nil { - t.Errorf("error removing symlink on demand: %s", err) - } - }() - workingdir, err := os.Getwd() - - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - CacheFrom: []string{"a", "b", "c"}, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: filepath.Join(workingdir, "testing", "data"), - } - err = client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - reqBody := fakeRT.requests[0].Body - tmpdir, err := unpackBodyTarball(reqBody) - if err != nil { - t.Fatal(err) - } - - defer func() { - if err = os.RemoveAll(tmpdir); err != nil { - t.Fatal(err) - } - }() - - files, err := ioutil.ReadDir(tmpdir) - if err != nil { - t.Fatal(err) - } - - foundFiles := []string{} - for _, file := range files { - foundFiles = append(foundFiles, file.Name()) - } - - expectedFiles := []string{ - ".dockerignore", - "Dockerfile", - "barfile", - "ca.pem", - "cert.pem", - "key.pem", - "server.pem", - "serverkey.pem", - "symlink", - } - - if !reflect.DeepEqual(expectedFiles, foundFiles) { - t.Errorf( - "BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v", - expectedFiles, foundFiles, - ) - } -} - -func TestBuildImageSendXRegistryConfig(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: "testing/data", - AuthConfigs: AuthConfigurations{ - Configs: map[string]AuthConfiguration{ - "quay.io": { - Username: "foo", - Password: "bar", - Email: "baz", - ServerAddress: "quay.io", - }, - }, - }, - } - - encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg==" - - if err := client.BuildImage(opts); err != nil { - t.Fatal(err) - } - - xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0] - if xRegistryConfig != encodedConfig { - t.Errorf( - "BuildImage: X-Registry-Config not set currectly: expected %q, got %q", - encodedConfig, - xRegistryConfig, - ) - } -} - -func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { - tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") - if err != nil { - return - } - err = archive.Untar(req, tmpdir, &archive.TarOptions{ - Compression: archive.Uncompressed, - NoLchown: true, - }) - return -} diff --git a/vendor/github.com/fsouza/go-dockerclient/change_test.go b/vendor/github.com/fsouza/go-dockerclient/change_test.go deleted file mode 100644 index 203d332f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/change_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "testing" - -func TestChangeString(t *testing.T) { - t.Parallel() - var tests = []struct { - change Change - expected string - }{ - {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, - {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, - {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, - {Change{"/etc/passwd", 33}, " /etc/passwd"}, - } - for _, tt := range tests { - if got := tt.change.String(); got != tt.expected { - t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index ef36559d..581e3141 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -33,8 +33,8 @@ import ( "github.com/docker/docker/opts" "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stdcopy" + "github.com/fsouza/go-dockerclient/internal/jsonmessage" ) const ( diff --git a/vendor/github.com/fsouza/go-dockerclient/client_stress_test.go b/vendor/github.com/fsouza/go-dockerclient/client_stress_test.go deleted file mode 100644 index ae0e4e57..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client_stress_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2017 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows - -package docker - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "sort" - "sync" - "testing" - "time" -) - -func TestClientDoConcurrentStress(t *testing.T) { - t.Parallel() - var reqs []*http.Request - var mu sync.Mutex - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - reqs = append(reqs, r) - mu.Unlock() - }) - var nativeSrvs []*httptest.Server - for i := 0; i < 3; i++ { - srv, cleanup, err := newNativeServer(handler) - if err != nil { - t.Fatal(err) - } - defer cleanup() - nativeSrvs = append(nativeSrvs, srv) - } - var tests = []struct { - testCase string - srv *httptest.Server - scheme string - withTimeout bool - withTLSServer bool - withTLSClient bool - }{ - {testCase: "http server", srv: httptest.NewUnstartedServer(handler), scheme: "http"}, - {testCase: "native server", srv: nativeSrvs[0], scheme: nativeProtocol}, - {testCase: "http with timeout", srv: httptest.NewUnstartedServer(handler), scheme: "http", withTimeout: true}, - {testCase: "native with timeout", srv: nativeSrvs[1], scheme: nativeProtocol, withTimeout: true}, - {testCase: "http with tls", srv: httptest.NewUnstartedServer(handler), scheme: "https", withTLSServer: true, withTLSClient: true}, - {testCase: "native with client-only tls", srv: nativeSrvs[2], scheme: nativeProtocol, withTLSServer: false, withTLSClient: nativeProtocol == unixProtocol}, // TLS client only works with unix protocol - } - for _, tt := range tests { - t.Run(tt.testCase, func(t *testing.T) { - reqs = nil - var client *Client - var err error - endpoint := tt.scheme + "://" + tt.srv.Listener.Addr().String() - if tt.withTLSServer { - tt.srv.StartTLS() - } else { - tt.srv.Start() - } - defer tt.srv.Close() - if tt.withTLSClient { - certPEMBlock, certErr := ioutil.ReadFile("testing/data/cert.pem") - if certErr != nil { - t.Fatal(certErr) - } - keyPEMBlock, certErr := ioutil.ReadFile("testing/data/key.pem") - if certErr != nil { - t.Fatal(certErr) - } - client, err = NewTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, nil) - } else { - client, err = NewClient(endpoint) - } - if err != nil { - t.Fatal(err) - } - if tt.withTimeout { - client.SetTimeout(time.Minute) - } - n := 50 - wg := sync.WaitGroup{} - var paths []string - errsCh := make(chan error, 3*n) - waiters := make(chan CloseWaiter, n) - for i := 0; i < n; i++ { - path := fmt.Sprintf("/%05d", i) - paths = append(paths, "GET"+path) - paths = append(paths, "POST"+path) - paths = append(paths, "HEAD"+path) - wg.Add(1) - go func() { - defer wg.Done() - _, clientErr := client.do("GET", path, doOptions{}) - if clientErr != nil { - errsCh <- clientErr - } - clientErr = client.stream("POST", path, streamOptions{}) - if clientErr != nil { - errsCh <- clientErr - } - cw, clientErr := client.hijack("HEAD", path, hijackOptions{}) - if clientErr != nil { - errsCh <- clientErr - } else { - waiters <- cw - } - }() - } - wg.Wait() - close(errsCh) - close(waiters) - for cw := range waiters { - cw.Wait() - cw.Close() - } - for err = range errsCh { - t.Error(err) - } - var reqPaths []string - for _, r := range reqs { - reqPaths = append(reqPaths, r.Method+r.URL.Path) - } - sort.Strings(paths) - sort.Strings(reqPaths) - if !reflect.DeepEqual(reqPaths, paths) { - t.Fatalf("expected server request paths to equal %v, got: %v", paths, reqPaths) - } - }) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_test.go b/vendor/github.com/fsouza/go-dockerclient/client_test.go deleted file mode 100644 index a12ab82b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client_test.go +++ /dev/null @@ -1,845 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "testing" - "time" -) - -func TestNewAPIClient(t *testing.T) { - t.Parallel() - endpoint := "http://localhost:4243" - client, err := NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - // test native endpoints - endpoint = nativeRealEndpoint - client, err = NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func newTLSClient(endpoint string) (*Client, error) { - return NewTLSClient(endpoint, - "testing/data/cert.pem", - "testing/data/key.pem", - "testing/data/ca.pem") -} - -func TestNewTSLAPIClient(t *testing.T) { - t.Parallel() - endpoint := "https://localhost:4243" - client, err := newTLSClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func TestNewVersionedClient(t *testing.T) { - t.Parallel() - endpoint := "http://localhost:4243" - client, err := NewVersionedClient(endpoint, "1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewVersionedClientFromEnv(t *testing.T) { - t.Parallel() - endpoint := "tcp://localhost:2376" - endpointURL := "http://localhost:2376" - os.Setenv("DOCKER_HOST", endpoint) - os.Setenv("DOCKER_TLS_VERIFY", "") - client, err := NewVersionedClientFromEnv("1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.endpointURL.String() != endpointURL { - t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewVersionedClientFromEnvTLS(t *testing.T) { - t.Parallel() - endpoint := "tcp://localhost:2376" - endpointURL := "https://localhost:2376" - base, _ := os.Getwd() - os.Setenv("DOCKER_CERT_PATH", filepath.Join(base, "/testing/data/")) - os.Setenv("DOCKER_HOST", endpoint) - os.Setenv("DOCKER_TLS_VERIFY", "1") - client, err := NewVersionedClientFromEnv("1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.endpointURL.String() != endpointURL { - t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClient(t *testing.T) { - t.Parallel() - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/ca.pem" - endpoint := "https://localhost:4243" - client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClientNoClientCert(t *testing.T) { - t.Parallel() - certPath := "testing/data/cert_doesnotexist.pem" - keyPath := "testing/data/key_doesnotexist.pem" - caPath := "testing/data/ca.pem" - endpoint := "https://localhost:4243" - client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClientInvalidCA(t *testing.T) { - t.Parallel() - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/key.pem" - endpoint := "https://localhost:4243" - _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err == nil { - t.Errorf("Expected invalid ca at %s", caPath) - } -} - -func TestNewTLSVersionedClientInvalidCANoClientCert(t *testing.T) { - t.Parallel() - certPath := "testing/data/cert_doesnotexist.pem" - keyPath := "testing/data/key_doesnotexist.pem" - caPath := "testing/data/key.pem" - endpoint := "https://localhost:4243" - _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err == nil { - t.Errorf("Expected invalid ca at %s", caPath) - } -} - -func TestNewClientInvalidEndpoint(t *testing.T) { - t.Parallel() - cases := []string{ - "htp://localhost:3243", "http://localhost:a", - "", "http://localhost:8080:8383", "http://localhost:65536", - "https://localhost:-20", - } - for _, c := range cases { - client, err := NewClient(c) - if client != nil { - t.Errorf("Want client for invalid endpoint, got %#v.", client) - } - if !reflect.DeepEqual(err, ErrInvalidEndpoint) { - t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) - } - } -} - -func TestNewClientNoSchemeEndpoint(t *testing.T) { - t.Parallel() - cases := []string{"localhost", "localhost:8080"} - for _, c := range cases { - client, err := NewClient(c) - if client == nil { - t.Errorf("Want client for scheme-less endpoint, got ") - } - if err != nil { - t.Errorf("Got unexpected error scheme-less endpoint: %q", err) - } - } -} - -func TestNewTLSClient(t *testing.T) { - t.Parallel() - var tests = []struct { - endpoint string - expected string - }{ - {"tcp://localhost:2376", "https"}, - {"tcp://localhost:2375", "https"}, - {"tcp://localhost:4000", "https"}, - {"http://localhost:4000", "https"}, - } - for _, tt := range tests { - client, err := newTLSClient(tt.endpoint) - if err != nil { - t.Error(err) - } - got := client.endpointURL.Scheme - if got != tt.expected { - t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected) - } - } -} - -func TestEndpoint(t *testing.T) { - t.Parallel() - client, err := NewVersionedClient("http://localhost:4243", "1.12") - if err != nil { - t.Fatal(err) - } - if endpoint := client.Endpoint(); endpoint != client.endpoint { - t.Errorf("Client.Endpoint(): want %q. Got %q", client.endpoint, endpoint) - } -} - -func TestGetURL(t *testing.T) { - t.Parallel() - var tests = []struct { - endpoint string - path string - expected string - }{ - {"http://localhost:4243/", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"http://localhost:4243/////", "/", "http://localhost:4243/"}, - {nativeRealEndpoint, "/containers", "/containers"}, - } - for _, tt := range tests { - client, _ := NewClient(tt.endpoint) - client.endpoint = tt.endpoint - client.SkipServerVersionCheck = true - got := client.getURL(tt.path) - if got != tt.expected { - t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) - } - } -} - -func TestGetFakeNativeURL(t *testing.T) { - t.Parallel() - var tests = []struct { - endpoint string - path string - expected string - }{ - {nativeRealEndpoint, "/", "http://unix.sock/"}, - {nativeRealEndpoint, "/", "http://unix.sock/"}, - {nativeRealEndpoint, "/containers/ps", "http://unix.sock/containers/ps"}, - } - for _, tt := range tests { - client, _ := NewClient(tt.endpoint) - client.endpoint = tt.endpoint - client.SkipServerVersionCheck = true - got := client.getFakeNativeURL(tt.path) - if got != tt.expected { - t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) - } - } -} - -func TestError(t *testing.T) { - t.Parallel() - fakeBody := ioutil.NopCloser(bytes.NewBufferString("bad parameter")) - resp := &http.Response{ - StatusCode: 400, - Body: fakeBody, - } - err := newError(resp) - expected := Error{Status: 400, Message: "bad parameter"} - if !reflect.DeepEqual(expected, *err) { - t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) - } - message := "API error (400): bad parameter" - if err.Error() != message { - t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) - } -} - -func TestQueryString(t *testing.T) { - t.Parallel() - v := float32(2.4) - f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) - jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) - var tests = []struct { - input interface{} - want string - }{ - {&ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{Before: "something"}, "before=something"}, - {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, - {ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"}, - {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, - {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, - {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{T: 10, Y: 10.35000}, "y=10.35"}, - {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, - {nil, ""}, - {10, ""}, - {"not_a_struct", ""}, - } - for _, tt := range tests { - got := queryString(tt.input) - if got != tt.want { - t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) - } - } -} - -func TestAPIVersions(t *testing.T) { - t.Parallel() - var tests = []struct { - a string - b string - expectedALessThanB bool - expectedALessThanOrEqualToB bool - expectedAGreaterThanB bool - expectedAGreaterThanOrEqualToB bool - }{ - {"1.11", "1.11", false, true, false, true}, - {"1.10", "1.11", true, true, false, false}, - {"1.11", "1.10", false, false, true, true}, - - {"1.11-ubuntu0", "1.11", false, true, false, true}, - {"1.10", "1.11-el7", true, true, false, false}, - - {"1.9", "1.11", true, true, false, false}, - {"1.11", "1.9", false, false, true, true}, - - {"1.1.1", "1.1", false, false, true, true}, - {"1.1", "1.1.1", true, true, false, false}, - - {"2.1", "1.1.1", false, false, true, true}, - {"2.1", "1.3.1", false, false, true, true}, - {"1.1.1", "2.1", true, true, false, false}, - {"1.3.1", "2.1", true, true, false, false}, - } - - for _, tt := range tests { - a, _ := NewAPIVersion(tt.a) - b, _ := NewAPIVersion(tt.b) - - if tt.expectedALessThanB && !a.LessThan(b) { - t.Errorf("Expected %#v < %#v", a, b) - } - if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { - t.Errorf("Expected %#v <= %#v", a, b) - } - if tt.expectedAGreaterThanB && !a.GreaterThan(b) { - t.Errorf("Expected %#v > %#v", a, b) - } - if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { - t.Errorf("Expected %#v >= %#v", a, b) - } - } -} - -func TestPing(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.Ping() - if err != nil { - t.Fatal(err) - } -} - -func TestPingFailing(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (500): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingFailingWrongStatus(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (202): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingErrorWithNativeClient(t *testing.T) { - t.Parallel() - srv, cleanup, err := newNativeServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("aaaaaaaaaaa-invalid-aaaaaaaaaaa")) - })) - if err != nil { - t.Fatal(err) - } - defer cleanup() - srv.Start() - defer srv.Close() - endpoint := nativeBadEndpoint - client, err := NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - err = client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } -} - -func TestClientStreamTimeoutNotHit(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < 5; i++ { - fmt.Fprintf(w, "%d\n", i) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - time.Sleep(100 * time.Millisecond) - } - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - err = client.stream("POST", "/image/create", streamOptions{ - setRawTerminal: true, - stdout: &w, - inactivityTimeout: 300 * time.Millisecond, - }) - if err != nil { - t.Fatal(err) - } - expected := "0\n1\n2\n3\n4\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -func TestClientStreamInactivityTimeout(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < 5; i++ { - fmt.Fprintf(w, "%d\n", i) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - time.Sleep(500 * time.Millisecond) - } - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - err = client.stream("POST", "/image/create", streamOptions{ - setRawTerminal: true, - stdout: &w, - inactivityTimeout: 100 * time.Millisecond, - }) - if err != ErrInactivityTimeout { - t.Fatalf("expected request canceled error, got: %s", err) - } - expected := "0\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -func TestClientStreamContextDeadline(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "abc\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - time.Sleep(time.Second) - fmt.Fprint(w, "def\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - ctx, cancel := context.WithTimeout(context.Background(), 400*time.Millisecond) - defer cancel() - err = client.stream("POST", "/image/create", streamOptions{ - setRawTerminal: true, - stdout: &w, - context: ctx, - }) - if err != context.DeadlineExceeded { - t.Fatalf("expected %s, got: %s", context.DeadlineExceeded, err) - } - expected := "abc\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -func TestClientStreamContextCancel(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "abc\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - time.Sleep(500 * time.Millisecond) - fmt.Fprint(w, "def\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(200 * time.Millisecond) - cancel() - }() - err = client.stream("POST", "/image/create", streamOptions{ - setRawTerminal: true, - stdout: &w, - context: ctx, - }) - if err != context.Canceled { - t.Fatalf("expected %s, got: %s", context.Canceled, err) - } - expected := "abc\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -var mockPullOutput = `{"status":"Pulling from tsuru/static","id":"latest"} -{"status":"Already exists","progressDetail":{},"id":"a6aa3b66376f"} -{"status":"Pulling fs layer","progressDetail":{},"id":"106572778bf7"} -{"status":"Pulling fs layer","progressDetail":{},"id":"bac681833e51"} -{"status":"Pulling fs layer","progressDetail":{},"id":"7302e23ef08a"} -{"status":"Downloading","progressDetail":{"current":621,"total":621},"progress":"[==================================================\u003e] 621 B/621 B","id":"bac681833e51"} -{"status":"Verifying Checksum","progressDetail":{},"id":"bac681833e51"} -{"status":"Download complete","progressDetail":{},"id":"bac681833e51"} -{"status":"Downloading","progressDetail":{"current":1854,"total":1854},"progress":"[==================================================\u003e] 1.854 kB/1.854 kB","id":"106572778bf7"} -{"status":"Verifying Checksum","progressDetail":{},"id":"106572778bf7"} -{"status":"Download complete","progressDetail":{},"id":"106572778bf7"} -{"status":"Extracting","progressDetail":{"current":1854,"total":1854},"progress":"[==================================================\u003e] 1.854 kB/1.854 kB","id":"106572778bf7"} -{"status":"Extracting","progressDetail":{"current":1854,"total":1854},"progress":"[==================================================\u003e] 1.854 kB/1.854 kB","id":"106572778bf7"} -{"status":"Downloading","progressDetail":{"current":233019,"total":21059403},"progress":"[\u003e ] 233 kB/21.06 MB","id":"7302e23ef08a"} -{"status":"Downloading","progressDetail":{"current":462395,"total":21059403},"progress":"[=\u003e ] 462.4 kB/21.06 MB","id":"7302e23ef08a"} -{"status":"Downloading","progressDetail":{"current":8490555,"total":21059403},"progress":"[====================\u003e ] 8.491 MB/21.06 MB","id":"7302e23ef08a"} -{"status":"Downloading","progressDetail":{"current":20876859,"total":21059403},"progress":"[=================================================\u003e ] 20.88 MB/21.06 MB","id":"7302e23ef08a"} -{"status":"Verifying Checksum","progressDetail":{},"id":"7302e23ef08a"} -{"status":"Download complete","progressDetail":{},"id":"7302e23ef08a"} -{"status":"Pull complete","progressDetail":{},"id":"106572778bf7"} -{"status":"Extracting","progressDetail":{"current":621,"total":621},"progress":"[==================================================\u003e] 621 B/621 B","id":"bac681833e51"} -{"status":"Extracting","progressDetail":{"current":621,"total":621},"progress":"[==================================================\u003e] 621 B/621 B","id":"bac681833e51"} -{"status":"Pull complete","progressDetail":{},"id":"bac681833e51"} -{"status":"Extracting","progressDetail":{"current":229376,"total":21059403},"progress":"[\u003e ] 229.4 kB/21.06 MB","id":"7302e23ef08a"} -{"status":"Extracting","progressDetail":{"current":458752,"total":21059403},"progress":"[=\u003e ] 458.8 kB/21.06 MB","id":"7302e23ef08a"} -{"status":"Extracting","progressDetail":{"current":11239424,"total":21059403},"progress":"[==========================\u003e ] 11.24 MB/21.06 MB","id":"7302e23ef08a"} -{"status":"Extracting","progressDetail":{"current":21059403,"total":21059403},"progress":"[==================================================\u003e] 21.06 MB/21.06 MB","id":"7302e23ef08a"} -{"status":"Pull complete","progressDetail":{},"id":"7302e23ef08a"} -{"status":"Digest: sha256:b754472891aa7e33fc0214e3efa988174f2c2289285fcae868b7ec8b6675fc77"} -{"status":"Status: Downloaded newer image for 192.168.50.4:5000/tsuru/static"} -` - -func TestClientStreamJSONDecode(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(mockPullOutput)) - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - err = client.stream("POST", "/image/create", streamOptions{ - stdout: &w, - useJSONDecoder: true, - }) - if err != nil { - t.Fatal(err) - } - expected := `latest: Pulling from tsuru/static -a6aa3b66376f: Already exists -106572778bf7: Pulling fs layer -bac681833e51: Pulling fs layer -7302e23ef08a: Pulling fs layer -bac681833e51: Verifying Checksum -bac681833e51: Download complete -106572778bf7: Verifying Checksum -106572778bf7: Download complete -7302e23ef08a: Verifying Checksum -7302e23ef08a: Download complete -106572778bf7: Pull complete -bac681833e51: Pull complete -7302e23ef08a: Pull complete -Digest: sha256:b754472891aa7e33fc0214e3efa988174f2c2289285fcae868b7ec8b6675fc77 -Status: Downloaded newer image for 192.168.50.4:5000/tsuru/static -` - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -type terminalBuffer struct { - bytes.Buffer -} - -func (b *terminalBuffer) FD() uintptr { - return os.Stdout.Fd() -} - -func (b *terminalBuffer) IsTerminal() bool { - return true -} - -func TestClientStreamJSONDecodeWithTerminal(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(mockPullOutput)) - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - var w terminalBuffer - err = client.stream("POST", "/image/create", streamOptions{ - stdout: &w, - useJSONDecoder: true, - }) - if err != nil { - t.Fatal(err) - } - expected := "latest: Pulling from tsuru/static\n\n" + - "\x1b[1A\x1b[1K\x1b[K\ra6aa3b66376f: Already exists \r\x1b[1B\n" + - "\x1b[1A\x1b[1K\x1b[K\r106572778bf7: Pulling fs layer \r\x1b[1B\n" + - "\x1b[1A\x1b[1K\x1b[K\rbac681833e51: Pulling fs layer \r\x1b[1B\n" + - "\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Pulling fs layer \r\x1b[1B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Downloading [==================================================>] 621B/621B\r\x1b[2B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Verifying Checksum \r\x1b[2B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Download complete \r\x1b[2B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Downloading [==================================================>] 1.854kB/1.854kB\r\x1b[3B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Verifying Checksum \r\x1b[3B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Download complete \r\x1b[3B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Extracting [==================================================>] 1.854kB/1.854kB\r\x1b[3B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Extracting [==================================================>] 1.854kB/1.854kB\r\x1b[3B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Downloading [> ] 233kB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Downloading [=> ] 462.4kB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Downloading [====================> ] 8.491MB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Downloading [=================================================> ] 20.88MB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Verifying Checksum \r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Download complete \r\x1b[1B\x1b[3A\x1b[1K\x1b[K\r106572778bf7: Pull complete \r\x1b[3B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Extracting [==================================================>] 621B/621B\r\x1b[2B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Extracting [==================================================>] 621B/621B\r\x1b[2B\x1b[2A\x1b[1K\x1b[K\rbac681833e51: Pull complete \r\x1b[2B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Extracting [> ] 229.4kB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Extracting [=> ] 458.8kB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Extracting [==========================> ] 11.24MB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Extracting [==================================================>] 21.06MB/21.06MB\r\x1b[1B\x1b[1A\x1b[1K\x1b[K\r7302e23ef08a: Pull complete \r\x1b[1BDigest: sha256:b754472891aa7e33fc0214e3efa988174f2c2289285fcae868b7ec8b6675fc77\n" + - "Status: Downloaded newer image for 192.168.50.4:5000/tsuru/static\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -func TestClientDoContextDeadline(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(500 * time.Millisecond) - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - _, err = client.do("POST", "/image/create", doOptions{ - context: ctx, - }) - if err != context.DeadlineExceeded { - t.Fatalf("expected %s, got: %s", context.DeadlineExceeded, err) - } -} - -func TestClientDoContextCancel(t *testing.T) { - t.Parallel() - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(500 * time.Millisecond) - })) - client, err := NewClient(srv.URL) - if err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(100 * time.Millisecond) - cancel() - }() - _, err = client.do("POST", "/image/create", doOptions{ - context: ctx, - }) - if err != context.Canceled { - t.Fatalf("expected %s, got: %s", context.Canceled, err) - } -} - -func TestClientStreamTimeoutNativeClient(t *testing.T) { - t.Parallel() - srv, cleanup, err := newNativeServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < 5; i++ { - fmt.Fprintf(w, "%d\n", i) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - time.Sleep(500 * time.Millisecond) - } - })) - if err != nil { - t.Fatal(err) - } - defer cleanup() - srv.Start() - defer srv.Close() - client, err := NewClient(nativeProtocol + "://" + srv.Listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - var w bytes.Buffer - err = client.stream("POST", "/image/create", streamOptions{ - setRawTerminal: true, - stdout: &w, - inactivityTimeout: 100 * time.Millisecond, - }) - if err != ErrInactivityTimeout { - t.Fatalf("expected request canceled error, got: %s", err) - } - expected := "0\n" - result := w.String() - if result != expected { - t.Fatalf("expected stream result %q, got: %q", expected, result) - } -} - -type FakeRoundTripper struct { - message string - status int - header map[string]string - requests []*http.Request -} - -func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - body := strings.NewReader(rt.message) - rt.requests = append(rt.requests, r) - res := &http.Response{ - StatusCode: rt.status, - Body: ioutil.NopCloser(body), - Header: make(http.Header), - } - for k, v := range rt.header { - res.Header.Set(k, v) - } - return res, nil -} - -func (rt *FakeRoundTripper) Reset() { - rt.requests = nil -} - -type person struct { - Name string - Age int `json:"age"` -} - -type dumb struct { - T int `qs:"-"` - v int - W float32 - X int - Y float64 - Z int `qs:"zee"` - Person *person `qs:"p"` -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/client_unix_test.go deleted file mode 100644 index e085eb78..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client_unix_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !windows -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "testing" -) - -const ( - nativeProtocol = unixProtocol - nativeRealEndpoint = "unix:///var/run/docker.sock" - nativeBadEndpoint = "unix:///tmp/echo.sock" -) - -func TestNewTSLAPIClientUnixEndpoint(t *testing.T) { - t.Parallel() - srv, cleanup, err := newNativeServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("ok")) - })) - if err != nil { - t.Fatal(err) - } - defer cleanup() - srv.Start() - defer srv.Close() - endpoint := nativeProtocol + "://" + srv.Listener.Addr().String() - client, err := newTLSClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - rsp, err := client.do("GET", "/", doOptions{}) - if err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(rsp.Body) - if err != nil { - t.Fatal(err) - } - if string(data) != "ok" { - t.Fatalf("Expected response to be %q, got: %q", "ok", string(data)) - } -} - -func newNativeServer(handler http.Handler) (*httptest.Server, func(), error) { - tmpdir, err := ioutil.TempDir("", "socket") - if err != nil { - return nil, nil, err - } - socketPath := filepath.Join(tmpdir, "docker_test_stress.sock") - l, err := net.Listen("unix", socketPath) - if err != nil { - return nil, nil, err - } - srv := httptest.NewUnstartedServer(handler) - srv.Listener = l - return srv, func() { os.RemoveAll(tmpdir) }, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows_test.go b/vendor/github.com/fsouza/go-dockerclient/client_windows_test.go deleted file mode 100644 index d2ea62ac..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/client_windows_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build windows - -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "net/http" - "net/http/httptest" - "sync" - - "github.com/Microsoft/go-winio" -) - -const ( - nativeProtocol = namedPipeProtocol - nativeRealEndpoint = "npipe:////./pipe/docker_engine" - nativeBadEndpoint = "npipe:////./pipe/godockerclient_test_echo" -) - -var ( - // namedPipeCount is used to provide uniqueness in the named pipes generated - // by newNativeServer - namedPipeCount int - // namedPipesAllocateLock protects namedPipeCount - namedPipeAllocateLock sync.Mutex -) - -func newNativeServer(handler http.Handler) (*httptest.Server, func(), error) { - namedPipeAllocateLock.Lock() - defer namedPipeAllocateLock.Unlock() - pipeName := fmt.Sprintf("//./pipe/godockerclient_test_%d", namedPipeCount) - namedPipeCount++ - l, err := winio.ListenPipe(pipeName, &winio.PipeConfig{MessageMode: true}) - if err != nil { - return nil, nil, err - } - srv := httptest.NewUnstartedServer(handler) - srv.Listener = l - return srv, func() {}, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index e24c9fb2..b48bc2cf 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -788,6 +788,7 @@ type HostConfig struct { IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"` Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` Init bool `json:",omitempty" yaml:",omitempty"` + Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"` } // NetworkingConfig represents the container's networking configuration for each of its interfaces diff --git a/vendor/github.com/fsouza/go-dockerclient/container_test.go b/vendor/github.com/fsouza/go-dockerclient/container_test.go deleted file mode 100644 index 2dfcada6..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/container_test.go +++ /dev/null @@ -1,2807 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "strconv" - "strings" - "testing" - "time" -) - -func TestStateString(t *testing.T) { - t.Parallel() - started := time.Now().Add(-3 * time.Hour) - var tests = []struct { - input State - expected string - }{ - {State{Running: true, Paused: true, StartedAt: started}, "Up 3 hours (Paused)"}, - {State{Running: true, Restarting: true, ExitCode: 7, FinishedAt: started}, "Restarting (7) 3 hours ago"}, - {State{Running: true, StartedAt: started}, "Up 3 hours"}, - {State{RemovalInProgress: true}, "Removal In Progress"}, - {State{Dead: true}, "Dead"}, - {State{}, "Created"}, - {State{StartedAt: started}, ""}, - {State{ExitCode: 7, StartedAt: started, FinishedAt: started}, "Exited (7) 3 hours ago"}, - } - for _, tt := range tests { - if got := tt.input.String(); got != tt.expected { - t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) - } - } -} - -func TestStateStateString(t *testing.T) { - t.Parallel() - started := time.Now().Add(-3 * time.Hour) - var tests = []struct { - input State - expected string - }{ - {State{Running: true, Paused: true}, "paused"}, - {State{Running: true, Restarting: true}, "restarting"}, - {State{Running: true}, "running"}, - {State{Dead: true}, "dead"}, - {State{}, "created"}, - {State{StartedAt: started}, "exited"}, - } - for _, tt := range tests { - if got := tt.input.StateString(); got != tt.expected { - t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) - } - } -} - -func TestListContainers(t *testing.T) { - t.Parallel() - jsonContainers := `[ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], - "Created": 1367854152, - "Status": "Exit 0" - } -]` - var expected []APIContainers - err := json.Unmarshal([]byte(jsonContainers), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) - containers, err := client.ListContainers(ListContainersOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestListContainersParams(t *testing.T) { - t.Parallel() - var tests = []struct { - input ListContainersOptions - params map[string][]string - }{ - {ListContainersOptions{}, map[string][]string{}}, - {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, - {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, - { - ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, - map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, - }, - { - ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, - map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}}, - }, - { - ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}}, - map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}}, - }, - } - fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/containers/json")) - for _, tt := range tests { - if _, err := client.ListContainers(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "GET" { - t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) - } - fakeRT.Reset() - } -} - -func TestListContainersFailure(t *testing.T) { - t.Parallel() - var tests = []struct { - status int - message string - }{ - {400, "bad parameter"}, - {500, "internal server error"}, - } - for _, tt := range tests { - client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) - expected := Error{Status: tt.status, Message: tt.message} - containers, err := client.ListContainers(ListContainersOptions{}) - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) - } - if len(containers) > 0 { - t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) - } - } -} - -func TestInspectContainer(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "AppArmorProfile": "Profile", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "SecurityOpt": [ - "label:user:USER" - ], - "Ulimits": [ - { "Name": "nofile", "Soft": 1024, "Hard": 2048 } - ], - "Shell": [ - "/bin/sh", "-c" - ] - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Node": { - "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", - "IP": "192.168.99.105", - "Addra": "192.168.99.105:2376", - "Name": "node-01", - "Cpus": 4, - "Memory": 1048436736, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "3.18.5-tinycore64", - "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "CgroupParent": "/mesos", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "GroupAdd": ["fake", "12345"], - "OomScoreAdj": 642 - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerWithContext(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "AppArmorProfile": "Profile", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "SecurityOpt": [ - "label:user:USER" - ], - "Ulimits": [ - { "Name": "nofile", "Soft": 1024, "Hard": 2048 } - ] - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Node": { - "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", - "IP": "192.168.99.105", - "Addra": "192.168.99.105:2376", - "Name": "node-01", - "Cpus": 4, - "Memory": 1048436736, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "3.18.5-tinycore64", - "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "BlkioDeviceReadIOps": [ - { - "Path": "/dev/sdb", - "Rate": 100 - } - ], - "BlkioDeviceWriteBps": [ - { - "Path": "/dev/sdb", - "Rate": 5000 - } - ], - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "CgroupParent": "/mesos", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "GroupAdd": ["fake", "12345"], - "OomScoreAdj": 642 - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - - ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Second) - defer cancel() - - inspectError := make(chan error) - // Invoke InspectContainer in a goroutine. The response is sent to the 'inspectError' - // channel. - go func() { - container, err := client.InspectContainer(id) - if err != nil { - inspectError <- err - return - } - if !reflect.DeepEqual(*container, expected) { - inspectError <- fmt.Errorf("inspectContainer(%q): Expected %#v. Got %#v", id, expected, container) - return - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - inspectError <- fmt.Errorf("inspectContainer(%q): Wrong path in request. Want %q. Got %q", id, expectedURL.Path, gotPath) - return - } - // No errors to tbe reported. Send 'nil' - inspectError <- nil - }() - // Wait for either the inspect response or for the context. - select { - case err := <-inspectError: - if err != nil { - t.Fatalf("Error inspecting container with context: %v", err) - } - case <-ctx.Done(): - // Context was canceled unexpectedly. Report the same. - t.Fatalf("Context canceled when waiting for inspect container response: %v", ctx.Err()) - } -} - -func TestInspectContainerNetwork(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c", - "Created": "2015-11-12T14:54:04.791485659Z", - "Path": "consul-template", - "Args": [ - "-config=/tmp/haproxy.json", - "-consul=192.168.99.120:8500" - ], - "State": { - "Status": "running", - "Running": true, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 3196, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-11-12T14:54:05.026747471Z", - "FinishedAt": "0001-01-01T00:00:00Z" - }, - "Image": "4921c5917fc117df3dec32f4c1976635dc6c56ccd3336fe1db3477f950e78bf7", - "ResolvConfPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/resolv.conf", - "HostnamePath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hostname", - "HostsPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/hosts", - "LogPath": "/mnt/sda1/var/lib/docker/containers/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c/81e1bbe20b5508349e1c804eb08b7b6ca8366751dbea9f578b3ea0773fa66c1c-json.log", - "Node": { - "ID": "AUIB:LFOT:3LSF:SCFS:OYDQ:NLXD:JZNE:4INI:3DRC:ZFBB:GWCY:DWJK", - "IP": "192.168.99.121", - "Addr": "192.168.99.121:2376", - "Name": "swl-demo1", - "Cpus": 1, - "Memory": 2099945472, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "4.1.12-boot2docker", - "operatingsystem": "Boot2Docker 1.9.0 (TCL 6.4); master : 16e4a2a - Tue Nov 3 19:49:22 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Name": "/docker-proxy.swl-demo1", - "RestartCount": 0, - "Driver": "aufs", - "ExecDriver": "native-0.2", - "MountLabel": "", - "ProcessLabel": "", - "AppArmorProfile": "", - "ExecIDs": null, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Memory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "KernelMemory": 0, - "CpuShares": 0, - "CpuPeriod": 0, - "CpusetCpus": "", - "CpusetMems": "", - "CpuQuota": 0, - "BlkioWeight": 0, - "OomKillDisable": false, - "MemorySwappiness": -1, - "Privileged": false, - "PortBindings": { - "443/tcp": [ - { - "HostIp": "", - "HostPort": "443" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "Dns": null, - "DnsOptions": null, - "DnsSearch": null, - "ExtraHosts": null, - "VolumesFrom": null, - "Devices": [], - "NetworkMode": "swl-net", - "IpcMode": "", - "PidMode": "", - "UTSMode": "", - "CapAdd": null, - "CapDrop": null, - "GroupAdd": null, - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "SecurityOpt": null, - "ReadonlyRootfs": false, - "Ulimits": null, - "LogConfig": { - "Type": "json-file", - "Config": {} - }, - "CgroupParent": "", - "ConsoleSize": [ - 0, - 0 - ], - "VolumeDriver": "" - }, - "GraphDriver": { - "Name": "aufs", - "Data": null - }, - "Mounts": [], - "Config": { - "Hostname": "81e1bbe20b55", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": { - "443/tcp": {} - }, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "DOMAIN=local.auto", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "CONSUL_TEMPLATE_VERSION=0.11.1" - ], - "Cmd": [ - "-consul=192.168.99.120:8500" - ], - "Image": "docker-proxy:latest", - "Volumes": null, - "WorkingDir": "", - "Entrypoint": [ - "consul-template", - "-config=/tmp/haproxy.json" - ], - "OnBuild": null, - "Labels": {}, - "StopSignal": "SIGTERM" - }, - "NetworkSettings": { - "Bridge": "", - "SandboxID": "c6b903dc5c1a96113a22dbc44709e30194079bd2d262eea1eb4f38d85821f6e1", - "HairpinMode": false, - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "Ports": { - "443/tcp": [ - { - "HostIp": "192.168.99.121", - "HostPort": "443" - } - ] - }, - "SandboxKey": "/var/run/docker/netns/c6b903dc5c1a", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null, - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "MacAddress": "", - "Networks": { - "swl-net": { - "Aliases": [ - "testalias", - "81e1bbe20b55" - ], - "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", - "EndpointID": "683e3092275782a53c3b0968cc7e3a10f23264022ded9cb20490902f96fc5981", - "Gateway": "", - "IPAddress": "10.0.0.3", - "IPPrefixLen": 24, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:0a:00:00:03" - } - } - } -}` - - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "81e1bbe20b55" - expIP := "10.0.0.3" - expNetworkID := "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - expectedAliases := []string{"testalias", "81e1bbe20b55"} - - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - - s := reflect.Indirect(reflect.ValueOf(container.NetworkSettings)) - networks := s.FieldByName("Networks") - if networks.IsValid() { - var ip string - for _, net := range networks.MapKeys() { - if net.Interface().(string) == container.HostConfig.NetworkMode { - ip = networks.MapIndex(net).FieldByName("IPAddress").Interface().(string) - t.Logf("%s %v", net, ip) - } - } - if ip != expIP { - t.Errorf("InspectContainerNetworks(%q): Expected %#v. Got %#v.", id, expIP, ip) - } - - var networkID string - for _, net := range networks.MapKeys() { - if net.Interface().(string) == container.HostConfig.NetworkMode { - networkID = networks.MapIndex(net).FieldByName("NetworkID").Interface().(string) - t.Logf("%s %v", net, networkID) - } - } - - var aliases []string - for _, net := range networks.MapKeys() { - if net.Interface().(string) == container.HostConfig.NetworkMode { - aliases = networks.MapIndex(net).FieldByName("Aliases").Interface().([]string) - } - } - if !reflect.DeepEqual(aliases, expectedAliases) { - t.Errorf("InspectContainerNetworks(%q): Expected Aliases %#v. Got %#v.", id, expectedAliases, aliases) - } - - if networkID != expNetworkID { - t.Errorf("InspectContainerNetworks(%q): Expected %#v. Got %#v.", id, expNetworkID, networkID) - } - } else { - t.Errorf("InspectContainerNetworks(%q): No method Networks for NetworkSettings", id) - } - -} - -func TestInspectContainerNegativeSwap(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": -1, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerFailure(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChanges(t *testing.T) { - t.Parallel() - jsonChanges := `[ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } -]` - var expected []Change - err := json.Unmarshal([]byte(jsonChanges), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - changes, err := client.ContainerChanges(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(changes, expected) { - t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestContainerChangesFailure(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChangesNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestCreateContainer(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if container.ID != id { - t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestCreateContainerImageNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if !reflect.DeepEqual(err, ErrNoSuchImage) { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestCreateContainerDuplicateName(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if err != ErrContainerAlreadyExists { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) - } -} - -// Workaround for 17.09 bug returning 400 instead of 409. -// See https://github.com/moby/moby/issues/35021 -func TestCreateContainerDuplicateNameWorkaroundDocker17_09(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: `{"message":"Conflict. The container name \"/c1\" is already in use by container \"2ce137e165dfca5e087f247b5d05a2311f91ef3da4bb7772168446a1a47e2f68\". You have to remove (or rename) that container to be able to reuse that name."}`, status: http.StatusBadRequest}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if err != ErrContainerAlreadyExists { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) - } -} - -func TestCreateContainerWithHostConfig(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{} - hostConfig := HostConfig{PublishAllPorts: true} - opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig} - _, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotBody map[string]interface{} - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if _, ok := gotBody["HostConfig"]; !ok { - t.Errorf("CreateContainer: wrong body. HostConfig was not serialized") - } -} - -func TestUpdateContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - update := UpdateContainerOptions{Memory: 12345, CpusetMems: "0,1"} - err := client.UpdateContainer(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/update")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("UpdateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateContainer: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateContainerOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(out, update) { - t.Errorf("UpdateContainer: wrong body, got: %#v, want %#v", out, update) - } -} - -func TestStartContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, &HostConfig{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } -} - -func TestStartContainerHostConfigAPI124(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - client.serverAPIVersion = apiVersion124 - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, &HostConfig{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - notAcceptedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType == notAcceptedContentType { - t.Errorf("StartContainer(%q): Unepected %q Content-Type in request.", id, contentType) - } - if req.Body != nil { - data, _ := ioutil.ReadAll(req.Body) - t.Errorf("StartContainer(%q): Unexpected data sent: %s", id, data) - } -} - -func TestStartContainerNilHostConfig(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, nil) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } - var buf [4]byte - req.Body.Read(buf[:]) - if string(buf[:]) != "null" { - t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:]) - } -} - -func TestStartContainerWithContext(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - - ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Second) - defer cancel() - - startError := make(chan error) - go func() { - startError <- client.StartContainerWithContext(id, &HostConfig{}, ctx) - }() - select { - case err := <-startError: - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } - case <-ctx.Done(): - // Context was canceled unexpectedly. Report the same. - t.Fatalf("Context canceled when waiting for start container response: %v", ctx.Err()) - } -} - -func TestStartContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StartContainer("a2344", &HostConfig{}) - expected := &NoSuchContainer{ID: "a2344", Err: err.(*NoSuchContainer).Err} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStartContainerAlreadyRunning(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified}) - err := client.StartContainer("a2334", &HostConfig{}) - expected := &ContainerAlreadyRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StopContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestStopContainerWithContext(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - - ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Second) - defer cancel() - - stopError := make(chan error) - go func() { - stopError <- client.StopContainerWithContext(id, 10, ctx) - }() - select { - case err := <-stopError: - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - case <-ctx.Done(): - // Context was canceled unexpectedly. Report the same. - t.Fatalf("Context canceled when waiting for stop container response: %v", ctx.Err()) - } -} - -func TestStopContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StopContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainerNotRunning(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified}) - err := client.StopContainer("a2334", 10) - expected := &ContainerNotRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRestartContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.RestartContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRestartContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RestartContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestPauseContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.PauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestPauseContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.PauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUnpauseContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.UnpauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestUnpauseContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.UnpauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestKillContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestKillContainerSignal(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - if signal := req.URL.Query().Get("signal"); signal != "15" { - t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) - } -} - -func TestKillContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.KillContainer(KillContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestKillContainerNotRunning(t *testing.T) { - t.Parallel() - id := "abcd1234567890" - msg := fmt.Sprintf("Cannot kill container: %[1]s: Container %[1]s is not running", id) - client := newTestClient(&FakeRoundTripper{message: msg, status: http.StatusConflict}) - err := client.KillContainer(KillContainerOptions{ID: id}) - expected := &ContainerNotRunning{ID: id} - if !reflect.DeepEqual(err, expected) { - t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRemoveContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveContainerRemoveVolumes(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - params := map[string][]string(req.URL.Query()) - expected := map[string][]string{"v": {"1"}} - if !reflect.DeepEqual(params, expected) { - t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) - } -} - -func TestRemoveContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestResizeContainerTTY(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.ResizeContainerTTY(id, 40, 80) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - got := map[string][]string(req.URL.Query()) - expectedParams := map[string][]string{ - "w": {"80"}, - "h": {"40"}, - } - if !reflect.DeepEqual(got, expectedParams) { - t.Errorf("Expected %#v, got %#v.", expectedParams, got) - } -} - -func TestWaitContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - status, err := client.WaitContainer(id) - if err != nil { - t.Fatal(err) - } - if status != 56 { - t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestWaitContainerWithContext(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - - ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Second) - defer cancel() - - var status int - waitError := make(chan error) - go func() { - var err error - status, err = client.WaitContainerWithContext(id, ctx) - waitError <- err - }() - select { - case err := <-waitError: - if err != nil { - t.Fatal(err) - } - if status != 56 { - t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - case <-ctx.Done(): - // Context was canceled unexpectedly. Report the same. - t.Fatalf("Context canceled when waiting for wait container response: %v", ctx.Err()) - } -} - -func TestWaitContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.WaitContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestCommitContainer(t *testing.T) { - t.Parallel() - response := `{"Id":"596069db4bf5"}` - client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) - id := "596069db4bf5" - image, err := client.CommitContainer(CommitContainerOptions{}) - if err != nil { - t.Fatal(err) - } - if image.ID != id { - t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) - } -} - -func TestCommitContainerParams(t *testing.T) { - t.Parallel() - cfg := Config{Memory: 67108864} - json, _ := json.Marshal(&cfg) - var tests = []struct { - input CommitContainerOptions - params map[string][]string - body []byte - }{ - {CommitContainerOptions{}, map[string][]string{}, nil}, - {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, - { - CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, - map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "comment": {"something"}}, - nil, - }, - { - CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, - map[string][]string{"container": {"44c004db4b17"}}, - json, - }, - } - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/commit")) - for _, tt := range tests { - if _, err := client.CommitContainer(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "POST" { - t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) - } - if tt.body != nil { - if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { - if !bytes.Equal(requestBody, tt.body) { - t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) - } - } else { - t.Errorf("Error reading request body: %#v", err) - } - } - fakeRT.Reset() - } -} - -func TestCommitContainerFailure(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) - _, err := client.CommitContainer(CommitContainerOptions{}) - if err == nil { - t.Error("Expected non-nil error, got .") - } -} - -func TestCommitContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.CommitContainer(CommitContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestAttachToContainerLogs(t *testing.T) { - t.Parallel() - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &buf, - Stdout: true, - Stderr: true, - Logs: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "POST" { - t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/attach")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "logs": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestAttachToContainer(t *testing.T) { - t.Parallel() - var reader = strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestAttachToContainerSentinel(t *testing.T) { - t.Parallel() - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - Success: success, - } - errCh := make(chan error) - go func() { - errCh <- client.AttachToContainer(opts) - }() - success <- <-success - if err := <-errCh; err != nil { - t.Error(err) - } -} - -func TestAttachToContainerNilStdout(t *testing.T) { - t.Parallel() - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: nil, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerNilStderr(t *testing.T) { - t.Parallel() - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerStdinOnly(t *testing.T) { - t.Parallel() - var reader = strings.NewReader("send value") - serverFinished := make(chan struct{}) - clientFinished := make(chan struct{}) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - hj, ok := w.(http.Hijacker) - if !ok { - t.Fatal("cannot hijack server connection") - } - conn, _, err := hj.Hijack() - if err != nil { - t.Fatal(err) - } - // wait for client to indicate it's finished - <-clientFinished - // inform test that the server has finished - close(serverFinished) - conn.Close() - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - success := make(chan struct{}) - opts := AttachToContainerOptions{ - Container: "a123456", - InputStream: reader, - Stdin: true, - Stdout: false, - Stderr: false, - Stream: true, - RawTerminal: false, - Success: success, - } - go func() { - if err := client.AttachToContainer(opts); err != nil { - t.Error(err) - } - // client's attach session is over - close(clientFinished) - }() - success <- <-success - // wait for server to finish handling attach - <-serverFinished -} - -func TestAttachToContainerRawTerminalFalse(t *testing.T) { - t.Parallel() - input := strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req = *r - w.WriteHeader(http.StatusOK) - hj, ok := w.(http.Hijacker) - if !ok { - t.Fatal("cannot hijack server connection") - } - conn, _, err := hj.Hijack() - if err != nil { - t.Fatal(err) - } - conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - conn.Write([]byte("hello")) - conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6}) - conn.Write([]byte("hello!")) - time.Sleep(10 * time.Millisecond) - conn.Close() - })) - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: input, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: false, - } - client.AttachToContainer(opts) - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - server.Close() - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } - if stdout.String() != "hello" { - t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String()) - } - if stderr.String() != "hello!" { - t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String()) - } -} - -func TestAttachToContainerWithoutContainer(t *testing.T) { - t.Parallel() - var client Client - err := client.AttachToContainer(AttachToContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestLogs(t *testing.T) { - t.Parallel() - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"all"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsNilStdoutDoesntFail(t *testing.T) { - t.Parallel() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsNilStderrDoesntFail(t *testing.T) { - t.Parallel() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsSpecifyingTail(t *testing.T) { - t.Parallel() - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"100"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsRawTerminal(t *testing.T) { - t.Parallel() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - RawTerminal: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } -} - -func TestLogsNoContainer(t *testing.T) { - t.Parallel() - var client Client - err := client.Logs(LogsOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestNoSuchContainerError(t *testing.T) { - t.Parallel() - var err = &NoSuchContainer{ID: "i345"} - expected := "No such container: i345" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestNoSuchContainerErrorMessage(t *testing.T) { - t.Parallel() - var err = &NoSuchContainer{ID: "i345", Err: errors.New("some advanced error info")} - expected := "some advanced error info" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestExportContainer(t *testing.T) { - t.Parallel() - content := "exported container tar content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} - err := client.ExportContainer(opts) - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int, containerID string) { - defer close(done) - l, err := net.Listen(network, laddr) - if err != nil { - t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) - listening <- "" - return - } - defer l.Close() - listening <- l.Addr().String() - c, err := l.Accept() - if err != nil { - t.Logf("Accept failed: %v", err) - return - } - defer c.Close() - breader := bufio.NewReader(c) - req, err := http.ReadRequest(breader) - if err != nil { - t.Fatal(err) - } - if path := "/containers/" + containerID + "/export"; req.URL.Path != path { - t.Errorf("wrong path. Want %q. Got %q", path, req.URL.Path) - return - } - c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) -} - -func tempfile(filename string) string { - return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) -} - -func TestExportContainerNoId(t *testing.T) { - t.Parallel() - client := Client{} - out := stdoutMock{bytes.NewBufferString("")} - err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) - e, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e) - } - if e.ID != "" { - t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID) - } -} - -func TestUploadToContainer(t *testing.T) { - t.Parallel() - content := "File content" - in := stdinMock{bytes.NewBufferString(content)} - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - opts := UploadToContainerOptions{ - Path: "abc", - InputStream: in, - } - err := client.UploadToContainer("a123456", opts) - if err != nil { - t.Errorf("UploadToContainer: caught error %#v while uploading archive to container, expected nil", err) - } - - req := fakeRT.requests[0] - - if req.Method != "PUT" { - t.Errorf("UploadToContainer{Path:abc}: Wrong HTTP method. Want PUT. Got %s", req.Method) - } - - if pathParam := req.URL.Query().Get("path"); pathParam != "abc" { - t.Errorf("ListImages({Path:abc}): Wrong parameter. Want path=abc. Got path=%s", pathParam) - } - -} - -func TestDownloadFromContainer(t *testing.T) { - t.Parallel() - filecontent := "File content" - client := newTestClient(&FakeRoundTripper{message: filecontent, status: http.StatusOK}) - - var out bytes.Buffer - opts := DownloadFromContainerOptions{ - OutputStream: &out, - } - err := client.DownloadFromContainer("a123456", opts) - if err != nil { - t.Errorf("DownloadFromContainer: caught error %#v while downloading from container, expected nil", err.Error()) - } - if out.String() != filecontent { - t.Errorf("DownloadFromContainer: wrong stdout. Want %#v. Got %#v.", filecontent, out.String()) - } -} - -func TestCopyFromContainer(t *testing.T) { - t.Parallel() - content := "File content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := CopyFromContainerOptions{ - Container: "a123456", - OutputStream: &out, - } - err := client.CopyFromContainer(opts) - if err != nil { - t.Errorf("CopyFromContainer: caught error %#v while copying from container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestCopyFromContainerEmptyContainer(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - err := client.CopyFromContainer(CopyFromContainerOptions{}) - _, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) - } -} - -func TestCopyFromContainerDockerAPI124(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - client.serverAPIVersion = apiVersion124 - opts := CopyFromContainerOptions{ - Container: "a123456", - } - err := client.CopyFromContainer(opts) - if err == nil { - t.Fatal("got unexpected error") - } - expectedMsg := "go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead" - if err.Error() != expectedMsg { - t.Errorf("wrong error message\nWant %q\nGot %q", expectedMsg, err.Error()) - } -} - -func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { - t.Parallel() - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - if container.Name != "TestCreateContainer" { - t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) - } -} - -func TestAlwaysRestart(t *testing.T) { - t.Parallel() - policy := AlwaysRestart() - if policy.Name != "always" { - t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestRestartOnFailure(t *testing.T) { - t.Parallel() - const retry = 5 - policy := RestartOnFailure(retry) - if policy.Name != "on-failure" { - t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name) - } - if policy.MaximumRetryCount != retry { - t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount) - } -} - -func TestRestartUnlessStopped(t *testing.T) { - t.Parallel() - policy := RestartUnlessStopped() - if policy.Name != "unless-stopped" { - t.Errorf("RestartUnlessStopped(): wrong policy name. Want %q. Got %q", "unless-stopped", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("RestartUnlessStopped(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestNeverRestart(t *testing.T) { - t.Parallel() - policy := NeverRestart() - if policy.Name != "no" { - t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestTopContainer(t *testing.T) { - t.Parallel() - jsonTop := `{ - "Processes": [ - [ - "ubuntu", - "3087", - "815", - "0", - "01:44", - "?", - "00:00:00", - "cmd1" - ], - [ - "root", - "3158", - "3087", - "0", - "01:44", - "?", - "00:00:01", - "cmd2" - ] - ], - "Titles": [ - "UID", - "PID", - "PPID", - "C", - "STIME", - "TTY", - "TIME", - "CMD" - ] -}` - var expected TopResult - err := json.Unmarshal([]byte(jsonTop), &expected) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK} - client := newTestClient(fakeRT) - processes, err := client.TopContainer(id, "") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(processes, expected) { - t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes) - } - if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 || - processes.Processes[0][7] != "cmd1" { - t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes) - } - expectedURI := "/containers/" + id + "/top" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestTopContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.TopContainer("abef348", "") - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestTopContainerWithPsArgs(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound} - client := newTestClient(fakeRT) - expectedErr := &NoSuchContainer{ID: "abef348"} - if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) { - t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err) - } - expectedURI := "/containers/abef348/top?ps_args=aux" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestStats(t *testing.T) { - t.Parallel() - jsonStats1 := `{ - "read" : "2015-01-08T22:57:31.547920715Z", - "network" : { - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - }, - "networks" : { - "eth0":{ - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - } - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 0, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "hierarchical_memory_limit": 189204833, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477, - "swap" : 47312896, - "hierarchical_memsw_limit" : 1610612736 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000, - "online_cpus": 4 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000, - "online_cpus": 4 - } - }` - // 1 second later, cache is 100 - jsonStats2 := `{ - "read" : "2015-01-08T22:57:32.547920715Z", - "networks" : { - "eth0":{ - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - } - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 100, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477, - "swap" : 47312896, - "hierarchical_memsw_limit" : 1610612736 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000, - "online_cpus": 4 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000, - "online_cpus": 4 - } - }` - var expected1 Stats - var expected2 Stats - err := json.Unmarshal([]byte(jsonStats1), &expected1) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal([]byte(jsonStats2), &expected2) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(jsonStats1)) - w.Write([]byte(jsonStats2)) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - defer close(done) - go func() { - errC <- client.Stats(StatsOptions{ID: id, Stats: statsC, Stream: true, Done: done}) - close(errC) - }() - var resultStats []*Stats - for { - stats, ok := <-statsC - if !ok { - break - } - resultStats = append(resultStats, stats) - } - err = <-errC - if err != nil { - t.Fatal(err) - } - if len(resultStats) != 2 { - t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats)) - } - if !reflect.DeepEqual(resultStats[0], &expected1) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0]) - } - if !reflect.DeepEqual(resultStats[1], &expected2) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1]) - } - if req.Method != "GET" { - t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/" + id + "/stats")) - if req.URL.Path != u.Path { - t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestStatsContainerNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - statsC := make(chan *Stats) - done := make(chan bool) - defer close(done) - err := client.Stats(StatsOptions{ID: "abef348", Stats: statsC, Stream: true, Done: done}) - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRenameContainer(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := RenameContainerOptions{ID: "something_old", Name: "something_new"} - err := client.RenameContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RenameContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/something_old/rename?name=something_new")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RenameContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - expectedValues := expectedURL.Query()["name"] - actualValues := req.URL.Query()["name"] - if len(actualValues) != 1 || expectedValues[0] != actualValues[0] { - t.Errorf("RenameContainer: Wrong params in request. Want %q. Got %q.", expectedValues, actualValues) - } -} - -// sleepyRoundTripper implements the http.RoundTripper interface. It sleeps -// for the 'sleep' duration and then returns an error for RoundTrip method. -type sleepyRoudTripper struct { - sleepDuration time.Duration -} - -func (rt *sleepyRoudTripper) RoundTrip(r *http.Request) (*http.Response, error) { - time.Sleep(rt.sleepDuration) - return nil, fmt.Errorf("Can't complete round trip") -} - -func TestInspectContainerWhenContextTimesOut(t *testing.T) { - t.Parallel() - rt := sleepyRoudTripper{sleepDuration: 200 * time.Millisecond} - - client := newTestClient(&rt) - - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - - _, err := client.InspectContainerWithContext("id", ctx) - if err != context.DeadlineExceeded { - t.Errorf("Expected 'DeadlineExceededError', got: %v", err) - } -} - -func TestStartContainerWhenContextTimesOut(t *testing.T) { - t.Parallel() - rt := sleepyRoudTripper{sleepDuration: 200 * time.Millisecond} - - client := newTestClient(&rt) - - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - - err := client.StartContainerWithContext("id", nil, ctx) - if err != context.DeadlineExceeded { - t.Errorf("Expected 'DeadlineExceededError', got: %v", err) - } -} - -func TestStopContainerWhenContextTimesOut(t *testing.T) { - t.Parallel() - rt := sleepyRoudTripper{sleepDuration: 300 * time.Millisecond} - - client := newTestClient(&rt) - - ctx, cancel := context.WithTimeout(context.TODO(), 50*time.Millisecond) - defer cancel() - - err := client.StopContainerWithContext("id", 10, ctx) - if err != context.DeadlineExceeded { - t.Errorf("Expected 'DeadlineExceededError', got: %v", err) - } -} - -func TestWaitContainerWhenContextTimesOut(t *testing.T) { - t.Parallel() - rt := sleepyRoudTripper{sleepDuration: 200 * time.Millisecond} - - client := newTestClient(&rt) - - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - - _, err := client.WaitContainerWithContext("id", ctx) - if err != context.DeadlineExceeded { - t.Errorf("Expected 'DeadlineExceededError', got: %v", err) - } -} - -func TestPruneContainers(t *testing.T) { - t.Parallel() - results := `{ - "ContainersDeleted": [ - "a", "b", "c" - ], - "SpaceReclaimed": 123 - }` - - expected := &PruneContainersResults{} - err := json.Unmarshal([]byte(results), expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: results, status: http.StatusOK}) - got, err := client.PruneContainers(PruneContainersOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("PruneContainers: Expected %#v. Got %#v.", expected, got) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_unix_test.go b/vendor/github.com/fsouza/go-dockerclient/container_unix_test.go deleted file mode 100644 index 5dab6025..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/container_unix_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build !windows -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bufio" - "bytes" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "testing" - "time" -) - -func TestExportContainerViaUnixSocket(t *testing.T) { - t.Parallel() - content := "exported container tar content" - var buf []byte - out := bytes.NewBuffer(buf) - tempSocket := tempfile("export_socket") - defer os.Remove(tempSocket) - endpoint := "unix://" + tempSocket - u, _ := parseEndpoint(endpoint, false) - client := Client{ - HTTPClient: defaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - } - listening := make(chan string) - done := make(chan int) - containerID := "4fa6e0f0c678" - go runStreamConnServer(t, "unix", tempSocket, listening, done, containerID) - <-listening // wait for server to start - opts := ExportContainerOptions{ID: containerID, OutputStream: out} - err := client.ExportContainer(opts) - <-done // make sure server stopped - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestStatsTimeoutUnixSocket(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "socket") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - socketPath := filepath.Join(tmpdir, "docker_test.sock") - t.Logf("socketPath=%s", socketPath) - l, err := net.Listen("unix", socketPath) - if err != nil { - t.Fatal(err) - } - received := make(chan bool) - defer l.Close() - go func() { - conn, connErr := l.Accept() - if connErr != nil { - t.Logf("Failed to accept connection: %s", connErr) - return - } - breader := bufio.NewReader(conn) - req, connErr := http.ReadRequest(breader) - if connErr != nil { - t.Logf("Failed to read request: %s", connErr) - return - } - if req.URL.Path != "/containers/c/stats" { - t.Logf("Wrong URL path for stats: %q", req.URL.Path) - return - } - received <- true - time.Sleep(2 * time.Second) - }() - client, _ := NewClient("unix://" + socketPath) - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - defer close(done) - go func() { - errC <- client.Stats(StatsOptions{ID: "c", Stats: statsC, Stream: true, Done: done, Timeout: time.Millisecond}) - close(errC) - }() - err = <-errC - e, ok := err.(net.Error) - if !ok || !e.Timeout() { - t.Errorf("Failed to receive timeout error, got %#v", err) - } - recvTimeout := 2 * time.Second - select { - case <-received: - return - case <-time.After(recvTimeout): - t.Fatalf("Timeout waiting to receive message after %v", recvTimeout) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution_test.go b/vendor/github.com/fsouza/go-dockerclient/distribution_test.go deleted file mode 100644 index bb9a9ca0..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/distribution_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package docker - -import ( - "encoding/json" - "net/http" - "reflect" - "testing" - - "github.com/docker/docker/api/types/registry" -) - -func TestInspectDistribution(t *testing.T) { - t.Parallel() - jsonDistribution := `{ - "Descriptor": { - "MediaType": "application/vnd.docker.distribution.manifest.v2+json", - "Digest": "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", - "Size": 3987495, - "URLs": [ - "" - ] - }, - "Platforms": [ - { - "Architecture": "amd64", - "OS": "linux", - "OSVersion": "", - "OSFeatures": [ - "" - ], - "Variant": "", - "Features": [ - "" - ] - } - ] -}` - - var expected registry.DistributionInspect - err := json.Unmarshal([]byte(jsonDistribution), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonDistribution, status: http.StatusOK} - client := newTestClient(fakeRT) - // image name/tag is not present in the reply, so it can be omitted for testing purposes - distributionInspect, err := client.InspectDistribution("") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*distributionInspect, expected) { - t.Errorf("InspectDistribution(%q): Expected %#v. Got %#v.", "", expected, distributionInspect) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go index 13fedfb1..0f2e72f1 100644 --- a/vendor/github.com/fsouza/go-dockerclient/env.go +++ b/vendor/github.com/fsouza/go-dockerclient/env.go @@ -156,7 +156,7 @@ func (env *Env) SetAuto(key string, value interface{}) { // Map returns the map representation of the env. func (env *Env) Map() map[string]string { - if len(*env) == 0 { + if env == nil || len(*env) == 0 { return nil } m := make(map[string]string) diff --git a/vendor/github.com/fsouza/go-dockerclient/env_test.go b/vendor/github.com/fsouza/go-dockerclient/env_test.go deleted file mode 100644 index 5f844d0f..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/env_test.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "bytes" - "errors" - "reflect" - "sort" - "testing" -) - -func TestGet(t *testing.T) { - t.Parallel() - var tests = []struct { - input []string - query string - expected string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""}, - {[]string{"WAT="}, "WAT", ""}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Get(tt.query) - if got != tt.expected { - t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got) - } - } -} - -func TestExists(t *testing.T) { - t.Parallel() - var tests = []struct { - input []string - query string - expected bool - }{ - {[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Exists(tt.query) - if got != tt.expected { - t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got) - } - } -} - -func TestGetBool(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expected bool - }{ - {"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false}, - {"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true}, - {"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false}, - } - env := Env([]string{ - "EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false", - "NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin", - "ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t", - }) - for _, tt := range tests { - got := env.GetBool(tt.input) - if got != tt.expected { - t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got) - } - } -} - -func TestSetBool(t *testing.T) { - t.Parallel() - var tests = []struct { - input bool - expected string - }{ - {true, "1"}, {false, "0"}, - } - for _, tt := range tests { - var env Env - env.SetBool("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expected int - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt(t *testing.T) { - t.Parallel() - var tests = []struct { - input int - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt64(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expected int64 - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt64(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt64(t *testing.T) { - t.Parallel() - var tests = []struct { - input int64 - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt64("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetJSON(t *testing.T) { - t.Parallel() - var p struct { - Name string `json:"name"` - Age int `json:"age"` - } - var env Env - env.Set("person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("person", &p) - if err != nil { - t.Error(err) - } - if p.Name != "Gopher" { - t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name) - } - if p.Age != 5 { - t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age) - } -} - -func TestGetJSONAbsent(t *testing.T) { - t.Parallel() - var l []string - var env Env - err := env.GetJSON("person", &l) - if err != nil { - t.Error(err) - } - if l != nil { - t.Errorf("Env.GetJSON(): get unexpected list %v", l) - } -} - -func TestGetJSONFailure(t *testing.T) { - t.Parallel() - var p []string - var env Env - env.Set("list-person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("list-person", &p) - if err == nil { - t.Errorf("Env.GetJSON(%q): got unexpected error.", "list-person") - } -} - -func TestSetJSON(t *testing.T) { - t.Parallel() - var p1 = struct { - Name string `json:"name"` - Age int `json:"age"` - }{Name: "Gopher", Age: 5} - var env Env - err := env.SetJSON("person", p1) - if err != nil { - t.Error(err) - } - var p2 struct { - Name string `json:"name"` - Age int `json:"age"` - } - err = env.GetJSON("person", &p2) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(p1, p2) { - t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2) - } -} - -func TestSetJSONFailure(t *testing.T) { - t.Parallel() - var env Env - err := env.SetJSON("person", unmarshable{}) - if err == nil { - t.Error("Env.SetJSON(): got unexpected error") - } - if env.Exists("person") { - t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person") - } -} - -func TestGetList(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expected []string - }{ - {"WAT=wat", []string{"wat"}}, - {`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}}, - {"WAT=", nil}, - } - for _, tt := range tests { - env := Env([]string{tt.input}) - got := env.GetList("WAT") - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got) - } - } -} - -func TestSetList(t *testing.T) { - t.Parallel() - list := []string{"a", "b", "c"} - var env Env - if err := env.SetList("SOME", list); err != nil { - t.Error(err) - } - if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) { - t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got) - } -} - -func TestSet(t *testing.T) { - t.Parallel() - var env Env - env.Set("PATH", "/home/bin:/bin") - env.Set("SOMETHING", "/usr/bin") - env.Set("PATH", "/bin") - if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } - if expected, got := "/bin", env.Get("PATH"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } -} - -func TestDecode(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expectedOut []string - expectedErr string - }{ - { - `{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`, - []string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`}, - "", - }, - {"}}", nil, "invalid character '}' looking for beginning of value"}, - {`{}`, nil, ""}, - } - for _, tt := range tests { - var env Env - err := env.Decode(bytes.NewBufferString(tt.input)) - if tt.expectedErr == "" { - if err != nil { - t.Error(err) - } - } else if tt.expectedErr != err.Error() { - t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err) - } - got := []string(env) - sort.Strings(got) - sort.Strings(tt.expectedOut) - if !reflect.DeepEqual(got, tt.expectedOut) { - t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got) - } - } -} - -func TestSetAuto(t *testing.T) { - t.Parallel() - buf := bytes.NewBufferString("oi") - var tests = []struct { - input interface{} - expected string - }{ - {10, "10"}, - {10.3, "10"}, - {"oi", "oi"}, - {buf, "{}"}, - {unmarshable{}, "{}"}, - } - for _, tt := range tests { - var env Env - env.SetAuto("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestMap(t *testing.T) { - t.Parallel() - var tests = []struct { - input []string - expected map[string]string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}}, - {[]string{"ENABLE_LOGGING", "PYTHONPATH=/usr/local"}, map[string]string{"ENABLE_LOGGING": "", "PYTHONPATH": "/usr/local"}}, - {nil, nil}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Map() - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got) - } - } -} - -type unmarshable struct { -} - -func (unmarshable) MarshalJSON() ([]byte, error) { - return nil, errors.New("cannot marshal") -} diff --git a/vendor/github.com/fsouza/go-dockerclient/event_test.go b/vendor/github.com/fsouza/go-dockerclient/event_test.go deleted file mode 100644 index ffe05ee7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/event_test.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bufio" - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" -) - -func TestEventListeners(t *testing.T) { - t.Parallel() - testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient) -} - -func TestTLSEventListeners(t *testing.T) { - t.Parallel() - testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server { - server := httptest.NewUnstartedServer(handler) - - cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem") - if err != nil { - t.Fatalf("Error loading server key pair: %s", err) - } - - caCert, err := ioutil.ReadFile("testing/data/ca.pem") - if err != nil { - t.Fatalf("Error loading ca certificate: %s", err) - } - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caCert) { - t.Fatalf("Could not add ca certificate") - } - - server.TLS = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caPool, - } - server.StartTLS() - return server - }, func(url string) (*Client, error) { - return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem") - }) -} - -func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) { - response := `{"action":"pull","type":"image","actor":{"id":"busybox:latest","attributes":{}},"time":1442421700,"timeNano":1442421700598988358} -{"action":"create","type":"container","actor":{"id":"5745704abe9caa5","attributes":{"image":"busybox"}},"time":1442421716,"timeNano":1442421716853979870} -{"action":"attach","type":"container","actor":{"id":"5745704abe9caa5","attributes":{"image":"busybox"}},"time":1442421716,"timeNano":1442421716894759198} -{"action":"start","type":"container","actor":{"id":"5745704abe9caa5","attributes":{"image":"busybox"}},"time":1442421716,"timeNano":1442421716983607193} -{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} -{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} -{"Action":"create","Actor":{"Attributes":{"HAProxyMode":"http","HealthCheck":"HttpGet","HealthCheckArgs":"http://127.0.0.1:39051/status/check","ServicePort_8080":"17801","image":"datanerd.us/siteeng/sample-app-go:latest","name":"sample-app-client-go-69818c1223ddb5"},"ID":"a925eaf4084d5c3bcf337b2abb05f566ebb94276dff34f6effb00d8ecd380e16"},"Type":"container","from":"datanerd.us/siteeng/sample-app-go:latest","id":"a925eaf4084d5c3bcf337b2abb05f566ebb94276dff34f6effb00d8ecd380e16","status":"create","time":1459133932,"timeNano":1459133932961735842}` - - server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rsc := bufio.NewScanner(strings.NewReader(response)) - for rsc.Scan() { - w.Write(rsc.Bytes()) - w.(http.Flusher).Flush() - time.Sleep(10 * time.Millisecond) - } - })) - defer server.Close() - - wantedEvents := []APIEvents{ - { - Action: "pull", - Type: "image", - Actor: APIActor{ - ID: "busybox:latest", - Attributes: map[string]string{}, - }, - - Status: "pull", - ID: "busybox:latest", - - Time: 1442421700, - TimeNano: 1442421700598988358, - }, - { - Action: "create", - Type: "container", - Actor: APIActor{ - ID: "5745704abe9caa5", - Attributes: map[string]string{ - "image": "busybox", - }, - }, - - Status: "create", - ID: "5745704abe9caa5", - From: "busybox", - - Time: 1442421716, - TimeNano: 1442421716853979870, - }, - { - Action: "attach", - Type: "container", - Actor: APIActor{ - ID: "5745704abe9caa5", - Attributes: map[string]string{ - "image": "busybox", - }, - }, - - Status: "attach", - ID: "5745704abe9caa5", - From: "busybox", - - Time: 1442421716, - TimeNano: 1442421716894759198, - }, - { - Action: "start", - Type: "container", - Actor: APIActor{ - ID: "5745704abe9caa5", - Attributes: map[string]string{ - "image": "busybox", - }, - }, - - Status: "start", - ID: "5745704abe9caa5", - From: "busybox", - - Time: 1442421716, - TimeNano: 1442421716983607193, - }, - - { - Action: "create", - Type: "container", - Actor: APIActor{ - ID: "dfdf82bd3881", - Attributes: map[string]string{ - "image": "base:latest", - }, - }, - - Status: "create", - ID: "dfdf82bd3881", - From: "base:latest", - - Time: 1374067924, - }, - { - Action: "start", - Type: "container", - Actor: APIActor{ - ID: "dfdf82bd3881", - Attributes: map[string]string{ - "image": "base:latest", - }, - }, - - Status: "start", - ID: "dfdf82bd3881", - From: "base:latest", - - Time: 1374067924, - }, - { - Action: "stop", - Type: "container", - Actor: APIActor{ - ID: "dfdf82bd3881", - Attributes: map[string]string{ - "image": "base:latest", - }, - }, - - Status: "stop", - ID: "dfdf82bd3881", - From: "base:latest", - - Time: 1374067966, - }, - { - Action: "destroy", - Type: "container", - Actor: APIActor{ - ID: "dfdf82bd3881", - Attributes: map[string]string{ - "image": "base:latest", - }, - }, - - Status: "destroy", - ID: "dfdf82bd3881", - From: "base:latest", - - Time: 1374067970, - }, - { - Action: "create", - Type: "container", - Status: "create", - From: "datanerd.us/siteeng/sample-app-go:latest", - ID: "a925eaf4084d5c3bcf337b2abb05f566ebb94276dff34f6effb00d8ecd380e16", - Time: 1459133932, - TimeNano: 1459133932961735842, - Actor: APIActor{ - ID: "a925eaf4084d5c3bcf337b2abb05f566ebb94276dff34f6effb00d8ecd380e16", - Attributes: map[string]string{ - "HAProxyMode": "http", - "HealthCheck": "HttpGet", - "HealthCheckArgs": "http://127.0.0.1:39051/status/check", - "ServicePort_8080": "17801", - "image": "datanerd.us/siteeng/sample-app-go:latest", - "name": "sample-app-client-go-69818c1223ddb5", - }, - }, - }, - } - - client, err := buildClient(server.URL) - if err != nil { - t.Errorf("Failed to create client: %s", err) - } - client.SkipServerVersionCheck = true - - listener := make(chan *APIEvents, len(wantedEvents)+1) - defer func() { - if err = client.RemoveEventListener(listener); err != nil { - t.Error(err) - } - }() - - err = client.AddEventListener(listener) - if err != nil { - t.Errorf("Failed to add event listener: %s", err) - } - - timeout := time.After(5 * time.Second) - events := make([]APIEvents, 0, len(wantedEvents)) - -loop: - for i := range wantedEvents { - select { - case msg, ok := <-listener: - if !ok { - break loop - } - events = append(events, *msg) - case <-timeout: - t.Fatalf("%s: timed out waiting on events after %d events", testName, i) - } - } - cmpr := cmp.Comparer(func(e1, e2 APIEvents) bool { - return e1.Action == e2.Action && e1.Actor.ID == e2.Actor.ID - }) - if dff := cmp.Diff(events, wantedEvents, cmpr); dff != "" { - t.Errorf("wrong events:\n%s", dff) - } -} - -func TestEventListenerReAdding(t *testing.T) { - t.Parallel() - endChan := make(chan bool) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - <-endChan - })) - - client, err := NewClient(server.URL) - if err != nil { - t.Errorf("Failed to create client: %s", err) - } - - listener := make(chan *APIEvents, 10) - if err := client.AddEventListener(listener); err != nil { - t.Errorf("Failed to add event listener: %s", err) - } - - // Make sure eventHijack() is started with the current eventMonitoringState. - time.Sleep(10 * time.Millisecond) - - if err := client.RemoveEventListener(listener); err != nil { - t.Errorf("Failed to remove event listener: %s", err) - } - - if err := client.AddEventListener(listener); err != nil { - t.Errorf("Failed to add event listener: %s", err) - } - - endChan <- true - - // Give the goroutine of the first eventHijack() time to handle the EOF. - time.Sleep(10 * time.Millisecond) -} diff --git a/vendor/github.com/fsouza/go-dockerclient/example_test.go b/vendor/github.com/fsouza/go-dockerclient/example_test.go deleted file mode 100644 index 0d2a7891..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/example_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker_test - -import ( - "archive/tar" - "bytes" - "fmt" - "log" - "time" - - "github.com/fsouza/go-dockerclient" -) - -func ExampleClient_AttachToContainer() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - client.SkipServerVersionCheck = true - // Reading logs from container a84849 and sending them to buf. - var buf bytes.Buffer - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Logs: true, - Stdout: true, - Stderr: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) - buf.Reset() - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Stdout: true, - Stream: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) -} - -func ExampleClient_BuildImage() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - t := time.Now() - inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) - tr := tar.NewWriter(inputbuf) - tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) - tr.Write([]byte("FROM base\n")) - tr.Close() - opts := docker.BuildImageOptions{ - Name: "test", - InputStream: inputbuf, - OutputStream: outputbuf, - } - if err := client.BuildImage(opts); err != nil { - log.Fatal(err) - } -} - -func ExampleClient_AddEventListener() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - listener := make(chan *docker.APIEvents) - err = client.AddEventListener(listener) - if err != nil { - log.Fatal(err) - } - - defer func() { - - err = client.RemoveEventListener(listener) - if err != nil { - log.Fatal(err) - } - - }() - - timeout := time.After(1 * time.Second) - - for { - select { - case msg := <-listener: - log.Println(msg) - case <-timeout: - return - } - } - -} - -func ExampleEnv_Map() { - e := docker.Env([]string{"A=1", "B=2", "C=3"}) - envs := e.Map() - for k, v := range envs { - fmt.Printf("%s=%q\n", k, v) - } -} - -func ExampleEnv_SetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - err := e.SetJSON("person", p) - if err != nil { - log.Fatal(err) - } -} - -func ExampleEnv_GetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - e.Set("person", `{"name":"Gopher","age":4}`) - err := e.GetJSON("person", &p) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec_test.go b/vendor/github.com/fsouza/go-dockerclient/exec_test.go deleted file mode 100644 index ba24ddd7..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/exec_test.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" -) - -func TestExecCreate(t *testing.T) { - t.Parallel() - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - Cmd: []string{"touch", "/tmp/file"}, - User: "a-user", - } - execObj, err := client.CreateExec(config) - if err != nil { - t.Fatal(err) - } - expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if execObj.ID != expectedID { - t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/test/exec")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody struct{ ID string } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestExecCreateWithEnvErr(t *testing.T) { - t.Parallel() - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - Env: []string{"foo=bar"}, - Cmd: []string{"touch", "/tmp/file"}, - User: "a-user", - } - _, err = client.CreateExec(config) - if err == nil || err.Error() != "exec configuration Env is only supported in API#1.25 and above" { - t.Error("CreateExec: options contain Env for unsupported api version") - } -} - -func TestExecCreateWithEnv(t *testing.T) { - t.Parallel() - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - endpoint := "http://localhost:4243" - u, _ := parseEndpoint("http://localhost:4243", false) - testAPIVersion, _ := NewAPIVersion("1.25") - client := Client{ - HTTPClient: &http.Client{Transport: fakeRT}, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - serverAPIVersion: testAPIVersion, - } - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - Env: []string{"foo=bar"}, - Cmd: []string{"touch", "/tmp/file"}, - User: "a-user", - } - _, err = client.CreateExec(config) - if err != nil { - t.Error(err) - } -} - -func TestExecCreateWithWorkingDirErr(t *testing.T) { - t.Parallel() - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - WorkingDir: "/tmp", - Cmd: []string{"touch", "file"}, - User: "a-user", - } - _, err = client.CreateExec(config) - if err == nil || err.Error() != "exec configuration WorkingDir is only supported in API#1.35 and above" { - t.Error("CreateExec: options contain WorkingDir for unsupported api version") - } -} - -func TestExecCreateWithWorkingDir(t *testing.T) { - t.Parallel() - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - endpoint := "http://localhost:4243" - u, _ := parseEndpoint("http://localhost:4243", false) - testAPIVersion, _ := NewAPIVersion("1.35") - client := Client{ - HTTPClient: &http.Client{Transport: fakeRT}, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - serverAPIVersion: testAPIVersion, - } - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - WorkingDir: "/tmp", - Cmd: []string{"touch", "file"}, - User: "a-user", - } - _, err = client.CreateExec(config) - if err != nil { - t.Error(err) - } -} - -func TestExecStartDetached(t *testing.T) { - t.Parallel() - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - config := StartExecOptions{ - Detach: true, - } - err := client.StartExec(execID, config) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - t.Log(req.Body) - var gotBody struct{ Detach bool } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if !gotBody.Detach { - t.Fatal("Expected Detach in StartExecOptions to be true") - } -} - -func TestExecStartAndAttach(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := StartExecOptions{ - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - RawTerminal: true, - Success: success, - } - go func() { - if err := client.StartExec(execID, opts); err != nil { - t.Error(err) - } - }() - <-success -} - -func TestExecResize(t *testing.T) { - t.Parallel() - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.ResizeExecTTY(execID, 10, 20) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20")) - if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} - -func TestExecInspect(t *testing.T) { - t.Parallel() - jsonExec := `{ - "CanRemove": false, - "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", - "DetachKeys": "", - "ExitCode": 2, - "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", - "OpenStderr": true, - "OpenStdin": true, - "OpenStdout": true, - "ProcessConfig": { - "arguments": [ - "-c", - "exit 2" - ], - "entrypoint": "sh", - "privileged": false, - "tty": true, - "user": "1000" - }, - "Running": false - }` - var expected ExecInspect - err := json.Unmarshal([]byte(jsonExec), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK} - client := newTestClient(fakeRT) - expectedID := "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - execObj, err := client.InspectExec(expectedID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*execObj, expected) { - t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/image_test.go b/vendor/github.com/fsouza/go-dockerclient/image_test.go deleted file mode 100644 index 1a05a2e1..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/image_test.go +++ /dev/null @@ -1,1178 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "reflect" - "strings" - "testing" - "time" -) - -func newTestClient(rt http.RoundTripper) Client { - endpoint := "http://localhost:4243" - u, _ := parseEndpoint("http://localhost:4243", false) - testAPIVersion, _ := NewAPIVersion("1.17") - client := Client{ - HTTPClient: &http.Client{Transport: rt}, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - serverAPIVersion: testAPIVersion, - } - return client -} - -type stdoutMock struct { - *bytes.Buffer -} - -func (m stdoutMock) Close() error { - return nil -} - -type stdinMock struct { - *bytes.Buffer -} - -func (m stdinMock) Close() error { - return nil -} - -func TestListImages(t *testing.T) { - t.Parallel() - body := `[ - { - "Repository":"base", - "Tag":"ubuntu-12.10", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "Repository":"base", - "Tag":"ubuntu-quantal", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "RepoTag": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTag": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2e", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } -]` - var expected []APIImages - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - images, err := client.ListImages(ListImagesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(images, expected) { - t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) - } -} - -func TestListImagesParameters(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} - client := newTestClient(fakeRT) - _, err := client.ListImages(ListImagesOptions{All: false}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method) - } - if all := req.URL.Query().Get("all"); all != "0" && all != "" { - t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{All: true}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - if all := req.URL.Query().Get("all"); all != "1" { - t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{ - "dangling": {"true"}, - }}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - body := req.URL.Query().Get("filters") - var filters map[string][]string - err = json.Unmarshal([]byte(body), &filters) - if err != nil { - t.Fatal(err) - } - if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" { - t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"]) - } -} - -func TestImageHistory(t *testing.T) { - t.Parallel() - body := `[ - { - "Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e", - "Tags": [ - "debian:7.6", - "debian:latest", - "debian:7", - "debian:wheezy" - ], - "Created": 1409856216, - "CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]" - }, - { - "Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce", - "Created": 1409856213, - "CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /", - "Size": 85178663 - }, - { - "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", - "Tags": [ - "scratch:latest" - ], - "Created": 1371157430 - } -]` - var expected []ImageHistory - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - history, err := client.ImageHistory("debian:latest") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(history, expected) { - t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history) - } -} - -func TestRemoveImage(t *testing.T) { - t.Parallel() - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImage(name) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveImageNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - err := client.RemoveImage("test:") - if err != ErrNoSuchImage { - t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestRemoveImageExtended(t *testing.T) { - t.Parallel() - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImageExtended(name, RemoveImageOptions{Force: true, NoPrune: true}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } - expectedQuery := "force=1&noprune=1" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestInspectImage(t *testing.T) { - t.Parallel() - body := `{ - "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Parent":"27cf784147099545", - "Created":"2013-03-23T22:24:18.818426Z", - "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "ContainerConfig":{"Memory":1}, - "VirtualSize":12345, - "RootFS": { - "Type": "layers", - "Layers": [ - "sha256:05a0deb2e405eb3095ab646dc1695a26bffe8bd4071e3af90efcf16e9d3f6d93", - "sha256:4c5db681b9aa9ab1cf666ec969a810c8ff4410e70e06394670dc4f3bf595532f" - ] - } -}` - - created, err := time.Parse(time.RFC3339Nano, "2013-03-23T22:24:18.818426Z") - if err != nil { - t.Fatal(err) - } - - expected := Image{ - ID: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - Parent: "27cf784147099545", - Created: created, - Container: "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - ContainerConfig: Config{ - Memory: 1, - }, - VirtualSize: 12345, - RootFS: &RootFS{ - Type: "layers", - Layers: []string{ - "sha256:05a0deb2e405eb3095ab646dc1695a26bffe8bd4071e3af90efcf16e9d3f6d93", - "sha256:4c5db681b9aa9ab1cf666ec969a810c8ff4410e70e06394670dc4f3bf595532f", - }, - }, - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - image, err := client.InspectImage(expected.ID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*image, expected) { - t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) - if req.URL.Path != u.Path { - t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) - } -} - -func TestInspectImageNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - name := "test" - image, err := client.InspectImage(name) - if image != nil { - t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) - } - if err != ErrNoSuchImage { - t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) - } -} - -func TestPushImage(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pushing 1/100" - if buf.String() != expected { - t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/test/push")) - if req.URL.Path != u.Path { - t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - if query := req.URL.Query().Encode(); query != "" { - t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) - } - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - if strings.TrimSpace(string(auth)) != "{}" { - t.Errorf("PushImage: wrong body. Want %q. Got %q.", - base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) - } -} - -func TestPushImageWithRawJSON(t *testing.T) { - t.Parallel() - body := ` - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"status":"Image successfully pushed"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - - err := client.PushImage(PushImageOptions{ - Name: "test", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestPushImageWithAuthentication(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - inputAuth := AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &gotAuth) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotAuth, inputAuth) { - t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) - } -} - -func TestPushImageCustomRegistry(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var authConfig AuthConfiguration - var buf bytes.Buffer - opts := PushImageOptions{ - Name: "test", Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PushImage(opts, authConfig) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedQuery := "registry=docker.tsuru.io" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPushImageNoName(t *testing.T) { - t.Parallel() - client := Client{} - err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestPullImage(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, - AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pulling 1/100" - if buf.String() != expected { - t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/create")) - if req.URL.Path != u.Path { - t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQuery := "fromImage=base" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPullImageWithDigest(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{ - Repository: "tsuru/bs:latest@sha256:504a2f04aa5d07768e4f7467ddd2618b07dd6013cfabca7dc527a3d9fa786580", - OutputStream: &buf, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pulling 1/100" - if buf.String() != expected { - t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/create")) - if req.URL.Path != u.Path { - t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQuery := url.Values{ - "fromImage": {"tsuru/bs:latest"}, - "tag": {"sha256:504a2f04aa5d07768e4f7467ddd2618b07dd6013cfabca7dc527a3d9fa786580"}, - } - if !reflect.DeepEqual(req.URL.Query(), expectedQuery) { - t.Errorf("PullImage: Wrong query string\nWant %#v\nGot %#v", expectedQuery, req.URL.Query()) - } -} - -func TestPullImageWithDigestAndTag(t *testing.T) { - // This is probably a wrong use of the Docker API, but let's let users - // send the request to the API. And also changing this behavior would - // be a breaking change on go-dockerclient. - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{ - Repository: "tsuru/bs:latest@sha256:504a2f04aa5d07768e4f7467ddd2618b07dd6013cfabca7dc527a3d9fa786580", - Tag: "latest", - OutputStream: &buf, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pulling 1/100" - if buf.String() != expected { - t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/create")) - if req.URL.Path != u.Path { - t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQuery := url.Values{ - "fromImage": {"tsuru/bs:latest@sha256:504a2f04aa5d07768e4f7467ddd2618b07dd6013cfabca7dc527a3d9fa786580"}, - "tag": {"latest"}, - } - if !reflect.DeepEqual(req.URL.Query(), expectedQuery) { - t.Errorf("PullImage: Wrong query string\nWant %#vGot %#v", expectedQuery, req.URL.Query()) - } -} - -func TestPullImageWithRawJSON(t *testing.T) { - t.Parallel() - body := ` - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{ - Repository: "base", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String()) - } -} - -func TestPullImageWithoutOutputStream(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageCustomRegistry(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageTag(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - Tag: "latest", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageNoRepository(t *testing.T) { - t.Parallel() - var opts PullImageOptions - client := Client{} - err := client.PullImage(opts, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestImportImageFromUrl(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "http://mycompany.com/file.tar", - Repository: "testimage", - Tag: "tag", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestImportImageFromInput(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - in := bytes.NewBufferString("tar content") - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "-", Repository: "testimage", - InputStream: in, OutputStream: &buf, - Tag: "tag", - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - e := "tar content" - if string(body) != e { - t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) - } -} - -func TestImportImageDoesNotPassInputIfSourceIsNotDash(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - in := bytes.NewBufferString("foo") - opts := ImportImageOptions{ - Source: "http://test.com/container.tar", Repository: "testimage", - InputStream: in, OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - if string(body) != "" { - t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) - } -} - -func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - tar, err := os.Open(tarPath) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - tarContent, err := ioutil.ReadAll(tar) - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tarContent, body) { - t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) - } -} - -func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageParameters(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - CacheFrom: []string{"test1", "test2"}, - SuppressOutput: true, - Pull: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - Memory: 1024, - Memswap: 2048, - CPUShares: 10, - CPUQuota: 7500, - CPUPeriod: 100000, - CPUSetCPUs: "0-3", - Ulimits: []ULimit{{Name: "nofile", Soft: 100, Hard: 200}}, - BuildArgs: []BuildArg{{Name: "SOME_VAR", Value: "some_value"}}, - InputStream: &buf, - OutputStream: &buf, - Labels: map[string]string{"k": "v"}, - NetworkMode: "host", - CgroupParent: "cgparent", - SecurityOpt: []string{"securityoptions"}, - } - err := client.BuildImage(opts) - if err != nil && !strings.Contains(err.Error(), "build image fail") { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{ - "t": {opts.Name}, - "nocache": {"1"}, - "q": {"1"}, - "pull": {"1"}, - "rm": {"1"}, - "forcerm": {"1"}, - "memory": {"1024"}, - "memswap": {"2048"}, - "cpushares": {"10"}, - "cpuquota": {"7500"}, - "cpuperiod": {"100000"}, - "cpusetcpus": {"0-3"}, - "labels": {`{"k":"v"}`}, - "ulimits": {`[{"Name":"nofile","Soft":100,"Hard":200}]`}, - "buildargs": {`{"SOME_VAR":"some_value"}`}, - "networkmode": {"host"}, - "cgroupparent": {"cgparent"}, - "securityopt": {"securityoptions"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v.\n Got %#v.", expected, got) - } -} - -func TestBuildImageParametersForRemoteBuild(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageMissingRepoAndNilInput(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != ErrMissingRepo { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) - } -} - -func TestBuildImageMissingOutputStream(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := BuildImageOptions{Name: "testImage"} - err := client.BuildImage(opts) - if err != ErrMissingOutputStream { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) - } -} - -func TestBuildImageWithRawJSON(t *testing.T) { - t.Parallel() - body := ` - {"stream":"Step 0 : FROM ubuntu:latest\n"} - {"stream":" ---\u003e 4300eb9d3c8d\n"} - {"stream":"Step 1 : MAINTAINER docker \n"} - {"stream":" ---\u003e Using cache\n"} - {"stream":" ---\u003e 3a3ed758c370\n"} - {"stream":"Step 2 : CMD /usr/bin/top\n"} - {"stream":" ---\u003e Running in 36b1479cc2e4\n"} - {"stream":" ---\u003e 4b6188aebe39\n"} - {"stream":"Removing intermediate container 36b1479cc2e4\n"} - {"stream":"Successfully built 4b6188aebe39\n"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - RmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - RawJSONStream: true, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestBuildImageRemoteWithoutName(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageParameters(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("base", opts) - if err != nil && !strings.Contains(err.Error(), "tag image fail") { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := "http://localhost:4243/images/base/tag?repo=testImage" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageMissingRepo(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("", opts) - if err != ErrNoSuchImage { - t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", - ErrNoSuchImage, err) - } -} - -func TestIsUrl(t *testing.T) { - t.Parallel() - url := "http://foo.bar/" - result := isURL(url) - if !result { - t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) - } - url = "/foo/bar.tar" - result = isURL(url) - if result { - t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) - } -} - -func TestLoadImage(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - tar, err := os.Open("testing/data/container.tar") - if err != nil { - t.Fatal(err) - } else { - defer tar.Close() - } - opts := LoadImageOptions{InputStream: tar} - err = client.LoadImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method) - } - if req.URL.Path != "/images/load" { - t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path) - } -} - -func TestExportImage(t *testing.T) { - t.Parallel() - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImageOptions{Name: "testimage", OutputStream: &buf} - err := client.ExportImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expectedPath := "/images/testimage/get" - if req.URL.Path != expectedPath { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path) - } -} - -func TestExportImages(t *testing.T) { - t.Parallel() - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{"testimage1", "testimage2:latest"}, OutputStream: &buf} - err := client.ExportImages(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expected := "http://localhost:4243/images/get?names=testimage1&names=testimage2%3Alatest" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expected, got) - } -} - -func TestExportImagesNoNames(t *testing.T) { - t.Parallel() - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{}, OutputStream: &buf} - err := client.ExportImages(opts) - if err == nil { - t.Error("Expected an error") - } - if err != ErrMustSpecifyNames { - t.Error(err) - } -} - -func TestSearchImages(t *testing.T) { - t.Parallel() - body := `[ - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - }, - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":false, - "name":"poklet/cassandra", - "star_count":17 - } - , - { - "description":"A container with Cassandra 2.0.3", - "is_official":false, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - } -]` - var expected []APIImageSearch - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - result, err := client.SearchImages("cassandra") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) - } -} - -func TestSearchImagesEx(t *testing.T) { - t.Parallel() - body := `[ - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - }, - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":false, - "name":"poklet/cassandra", - "star_count":17 - } - , - { - "description":"A container with Cassandra 2.0.3", - "is_official":false, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - } -]` - var expected []APIImageSearch - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - auth := AuthConfiguration{} - result, err := client.SearchImagesEx("cassandra", auth) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) - } -} - -func TestPruneImages(t *testing.T) { - t.Parallel() - results := `{ - "ImagesDeleted": [ - {"Deleted": "a"}, - {"Deleted": "b"}, - {"Deleted": "c"} - ], - "SpaceReclaimed": 123 - }` - - expected := &PruneImagesResults{} - err := json.Unmarshal([]byte(results), expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: results, status: http.StatusOK}) - got, err := client.PruneImages(PruneImagesOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("PruneImages: Expected %#v. Got %#v.", expected, got) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/integration_test.go b/vendor/github.com/fsouza/go-dockerclient/integration_test.go deleted file mode 100644 index eda7d527..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/integration_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build docker_integration - -package docker - -import ( - "bytes" - "os" - "testing" -) - -var dockerEndpoint string - -func init() { - dockerEndpoint = os.Getenv("DOCKER_HOST") - if dockerEndpoint == "" { - dockerEndpoint = "unix:///var/run/docker.sock" - } -} - -func TestIntegrationPullCreateStartLogs(t *testing.T) { - imageName := pullImage(t) - client := getClient() - hostConfig := HostConfig{PublishAllPorts: true} - createOpts := CreateContainerOptions{ - Config: &Config{ - Image: imageName, - Cmd: []string{"cat", "/home/gopher/file.txt"}, - User: "gopher", - }, - HostConfig: &hostConfig, - } - container, err := client.CreateContainer(createOpts) - if err != nil { - t.Fatal(err) - } - err = client.StartContainer(container.ID, &hostConfig) - if err != nil { - t.Fatal(err) - } - status, err := client.WaitContainer(container.ID) - if err != nil { - t.Error(err) - } - if status != 0 { - t.Errorf("WaitContainer(%q): wrong status. Want 0. Got %d", container.ID, status) - } - var stdout, stderr bytes.Buffer - logsOpts := LogsOptions{ - Container: container.ID, - OutputStream: &stdout, - ErrorStream: &stderr, - Stdout: true, - Stderr: true, - } - err = client.Logs(logsOpts) - if err != nil { - t.Error(err) - } - if stderr.String() != "" { - t.Errorf("Got unexpected stderr from logs: %q", stderr.String()) - } - expected := `Welcome to reality, wake up and rejoice -Welcome to reality, you've made the right choice -Welcome to reality, and let them hear your voice, shout it out! -` - if stdout.String() != expected { - t.Errorf("Got wrong stdout from logs.\nWant:\n%#v.\n\nGot:\n%#v.", expected, stdout.String()) - } -} - -func pullImage(t *testing.T) string { - imageName := "fsouza/go-dockerclient-integration:latest" - var buf bytes.Buffer - pullOpts := PullImageOptions{ - Repository: imageName, - OutputStream: &buf, - } - client := getClient() - err := client.PullImage(pullOpts, AuthConfiguration{}) - if err != nil { - t.Logf("Pull output: %s", buf.String()) - t.Fatal(err) - } - return imageName -} - -func getClient() *Client { - client, _ := NewClient(dockerEndpoint) - return client -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go new file mode 100644 index 00000000..7d7cf496 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go @@ -0,0 +1,505 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// Compression is the state represents if compressed or not. +type Compression int + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// WhiteoutFormat is the format of whiteouts unpacked +type WhiteoutFormat int + +// TarOptions wraps the tar options. +type TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && + !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && + !ta.IdentityMapping.Empty() { + fileIdentity, err := getFileIdentity(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go similarity index 86% rename from vendor/github.com/docker/docker/pkg/archive/archive_linux.go rename to vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go index 970d4d06..9e1f3f2f 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go @@ -1,4 +1,8 @@ -package archive // import "github.com/docker/docker/pkg/archive" +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive import ( "archive/tar" @@ -10,6 +14,14 @@ import ( "golang.org/x/sys/unix" ) +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { if format == OverlayWhiteoutFormat { return overlayWhiteoutConverter{} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go new file mode 100644 index 00000000..72822c85 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go @@ -0,0 +1,11 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go similarity index 52% rename from vendor/github.com/docker/docker/pkg/archive/archive_unix.go rename to vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go index e81076c1..80199d51 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go @@ -1,6 +1,10 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + // +build !windows -package archive // import "github.com/docker/docker/pkg/archive" +package archive import ( "archive/tar" @@ -10,11 +14,16 @@ import ( "syscall" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" "golang.org/x/sys/unix" ) +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { @@ -29,15 +38,24 @@ func getWalkRoot(srcPath string, include string) string { return srcPath + string(filepath.Separator) + include } -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = uint64(s.Ino) + } + + return } -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. +func getFileIdentity(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly @@ -57,58 +75,3 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( return } - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go similarity index 80% rename from vendor/github.com/docker/docker/pkg/archive/archive_windows.go rename to vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go index e75bfc9b..c47768e6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go @@ -1,4 +1,8 @@ -package archive // import "github.com/docker/docker/pkg/archive" +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive import ( "archive/tar" @@ -11,18 +15,6 @@ import ( "github.com/docker/docker/pkg/longpath" ) -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. @@ -38,6 +30,28 @@ func CanonicalTarNameForPath(p string) (string, error) { } +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +func getFileIdentity(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{}, nil +} + // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { @@ -55,23 +69,3 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // do nothing. no notion of Rdev, Nlink in stat on Windows return } - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - // no notion of file ownership mapping yet on Windows - return idtools.IDPair{0, 0}, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go new file mode 100644 index 00000000..39ea287b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go @@ -0,0 +1,16 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +// +build !windows + +package archive + +import ( + "os" + "syscall" +) + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go new file mode 100644 index 00000000..a9313047 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go @@ -0,0 +1,11 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive + +import "os" + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go new file mode 100644 index 00000000..45d45f20 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go @@ -0,0 +1,29 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive + +import ( + "os" + "path/filepath" +) + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go similarity index 85% rename from vendor/github.com/docker/docker/pkg/archive/whiteouts.go rename to vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go index 4c072a87..a61c22a0 100644 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go @@ -1,4 +1,8 @@ -package archive // import "github.com/docker/docker/pkg/archive" +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package archive // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go similarity index 95% rename from vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go rename to vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go index b9f40d3e..71b3395c 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go @@ -1,4 +1,8 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package jsonmessage import ( "encoding/json" @@ -8,9 +12,9 @@ import ( "strings" "time" - gotty "github.com/Nvveen/Gotty" - "github.com/docker/docker/pkg/term" - units "github.com/docker/go-units" + "github.com/Nvveen/Gotty" + "github.com/docker/go-units" + "github.com/fsouza/go-dockerclient/internal/term" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to @@ -245,7 +249,7 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) @@ -277,7 +281,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, if jm.Aux != nil { if auxCallback != nil { - auxCallback(jm.Aux) + auxCallback(jm) } continue } @@ -330,6 +334,6 @@ type stream interface { } // DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) } diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go new file mode 100644 index 00000000..af06911d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go @@ -0,0 +1,13 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package term + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go new file mode 100644 index 00000000..2a9964a0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go @@ -0,0 +1,16 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +// +build !windows + +package term + +import "golang.org/x/sys/unix" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go new file mode 100644 index 00000000..4a07a5d1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go @@ -0,0 +1,22 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package term + +import "github.com/Azure/go-ansiterm/winterm" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc_test.go b/vendor/github.com/fsouza/go-dockerclient/misc_test.go deleted file mode 100644 index 0e27d99a..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/misc_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2013 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "net" - "net/http" - "net/url" - "reflect" - "testing" -) - -type DockerVersion struct { - Version string - GitCommit string - GoVersion string -} - -func TestVersion(t *testing.T) { - t.Parallel() - body := `{ - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := DockerVersion{ - Version: "0.2.2", - GitCommit: "5a2a5cc+CHANGES", - GoVersion: "go1.0.3", - } - version, err := client.Version() - if err != nil { - t.Fatal(err) - } - - if result := version.Get("Version"); result != expected.Version { - t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) - } - if result := version.Get("GitCommit"); result != expected.GitCommit { - t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) - } - if result := version.Get("GoVersion"); result != expected.GoVersion { - t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/version")) - if req.URL.Path != u.Path { - t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestVersionError(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Version() - if version != nil { - t.Errorf("Version(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Version(): unexpected error") - } -} - -func TestInfo(t *testing.T) { - t.Parallel() - body := `{ - "Containers":11, - "Images":16, - "Debug":false, - "NFd":11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "RegistryConfig":{ - "InsecureRegistryCIDRs":["127.0.0.0/8"], - "IndexConfigs":{ - "docker.io":{ - "Name":"docker.io", - "Mirrors":null, - "Secure":true, - "Official":true - } - }, - "Mirrors":null - }, - "SecurityOptions": [ - "name=apparmor", - "name=seccomp", - "profile=default" - ] -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := &DockerInfo{ - Containers: 11, - Images: 16, - Debug: false, - NFd: 11, - NGoroutines: 21, - MemoryLimit: true, - SwapLimit: false, - RegistryConfig: &ServiceConfig{ - InsecureRegistryCIDRs: []*NetIPNet{ - { - Mask: net.CIDRMask(8, 32), - IP: net.ParseIP("127.0.0.0").To4(), - }, - }, - IndexConfigs: map[string]*IndexInfo{ - "docker.io": { - Name: "docker.io", - Secure: true, - Official: true, - }, - }, - }, - SecurityOptions: []string{ - "name=apparmor", - "name=seccomp", - "profile=default", - }, - } - info, err := client.Info() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, info) { - t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, info) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/info")) - if req.URL.Path != u.Path { - t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInfoError(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Info() - if version != nil { - t.Errorf("Info(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Info(): unexpected error") - } -} - -func TestParseRepositoryTag(t *testing.T) { - t.Parallel() - var tests = []struct { - input string - expectedRepo string - expectedTag string - }{ - { - "localhost.localdomain:5000/samalba/hipache:latest", - "localhost.localdomain:5000/samalba/hipache", - "latest", - }, - { - "localhost.localdomain:5000/samalba/hipache", - "localhost.localdomain:5000/samalba/hipache", - "", - }, - { - "tsuru/python", - "tsuru/python", - "", - }, - { - "tsuru/python:2.7", - "tsuru/python", - "2.7", - }, - { - "busybox@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92", - "busybox", - "", - }, - { - "localhost.localdomain:5000/samalba/hipache:v1@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92", - "localhost.localdomain:5000/samalba/hipache", - "v1", - }, - } - for _, tt := range tests { - repo, tag := ParseRepositoryTag(tt.input) - if repo != tt.expectedRepo { - t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo) - } - if tag != tt.expectedTag { - t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag) - } - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go index c6ddb22c..8c03b9ae 100644 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "net/http" + "net/url" ) // ErrNetworkAlreadyExists is the error returned by CreateNetwork when the @@ -71,7 +72,9 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) if err != nil { return nil, err } - path := "/networks?filters=" + string(params) + qs := make(url.Values) + qs.Add("filters", string(params)) + path := "/networks?" + qs.Encode() resp, err := c.do("GET", path, doOptions{}) if err != nil { return nil, err diff --git a/vendor/github.com/fsouza/go-dockerclient/network_test.go b/vendor/github.com/fsouza/go-dockerclient/network_test.go deleted file mode 100644 index dadc3170..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/network_test.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListNetworks(t *testing.T) { - t.Parallel() - jsonNetworks := `[ - { - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }, - { - "ID": "9fb1e39c", - "Name": "foo", - "Type": "bridge", - "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] - } -]` - var expected []Network - err := json.Unmarshal([]byte(jsonNetworks), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonNetworks, status: http.StatusOK}) - containers, err := client.ListNetworks() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestFilteredListNetworks(t *testing.T) { - t.Parallel() - jsonNetworks := `[ - { - "ID": "9fb1e39c", - "Name": "foo", - "Type": "bridge", - "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] - } -]` - var expected []Network - err := json.Unmarshal([]byte(jsonNetworks), &expected) - if err != nil { - t.Fatal(err) - } - wantQuery := "filters={\"name\":{\"blah\":true}}" - fakeRT := &FakeRoundTripper{message: jsonNetworks, status: http.StatusOK} - client := newTestClient(fakeRT) - opts := NetworkFilterOpts{ - "name": map[string]bool{"blah": true}, - } - containers, err := client.FilteredListNetworks(opts) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) - } - query := fakeRT.requests[0].URL.RawQuery - if query != wantQuery { - t.Errorf("FilteredListNetworks: wrong query\nWant %q\nGot %q", wantQuery, query) - } -} - -func TestNetworkInfo(t *testing.T) { - t.Parallel() - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonNetwork, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "8dfafdbc3a40" - network, err := client.NetworkInfo(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*network, expected) { - t.Errorf("NetworkInfo(%q): Expected %#v. Got %#v.", id, expected, network) - } - expectedURL, _ := url.Parse(client.getURL("/networks/8dfafdbc3a40")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("NetworkInfo(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestNetworkCreate(t *testing.T) { - jsonID := `{"ID": "8dfafdbc3a40"}` - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "foobar", - "Driver": "bridge" - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - - client := newTestClient(&FakeRoundTripper{message: jsonID, status: http.StatusOK}) - opts := CreateNetworkOptions{Name: "foobar", Driver: "bridge"} - network, err := client.CreateNetwork(opts) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(*network, expected) { - t.Errorf("CreateNetwork: Expected %#v. Got %#v.", expected, network) - } -} - -func TestNetworkRemove(t *testing.T) { - t.Parallel() - id := "8dfafdbc3a40" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveNetwork(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/networks/" + id)) - if req.URL.Path != u.Path { - t.Errorf("RemoveNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) - } -} - -func TestNetworkConnect(t *testing.T) { - t.Parallel() - id := "8dfafdbc3a40" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - opts := NetworkConnectionOptions{Container: "foobar"} - err := client.ConnectNetwork(id, opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("ConnectNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/networks/" + id + "/connect")) - if req.URL.Path != u.Path { - t.Errorf("ConnectNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) - } -} - -func TestNetworkConnectWithEndpoint(t *testing.T) { - t.Parallel() - wantJSON := `{"Container":"foobar","EndpointConfig":{"IPAMConfig":{"IPv4Address":"8.8.8.8"},"Links":null,"Aliases":null},"Force":false}` - var wantObj NetworkConnectionOptions - json.NewDecoder(bytes.NewBuffer([]byte(wantJSON))).Decode(&wantObj) - id := "8dfafdbc3a40" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - opts := NetworkConnectionOptions{ - Container: "foobar", - EndpointConfig: &EndpointConfig{ - IPAMConfig: &EndpointIPAMConfig{ - IPv4Address: "8.8.8.8", - }, - }, - } - err := client.ConnectNetwork(id, opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("ConnectNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/networks/" + id + "/connect")) - if req.URL.Path != u.Path { - t.Errorf("ConnectNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) - } - var in NetworkConnectionOptions - if err := json.NewDecoder(req.Body).Decode(&in); err != nil { - t.Errorf("ConnectNetwork: error parsing JSON data sent: %q", err) - } - if !reflect.DeepEqual(in, wantObj) { - t.Errorf("ConnectNetwork: wanted %#v send, got: %#v", wantObj, in) - } -} - -func TestNetworkConnectNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such network container", status: http.StatusNotFound}) - opts := NetworkConnectionOptions{Container: "foobar"} - err := client.ConnectNetwork("8dfafdbc3a40", opts) - if serr, ok := err.(*NoSuchNetworkOrContainer); !ok { - t.Errorf("ConnectNetwork: wrong error type: %s.", serr) - } -} - -func TestNetworkDisconnect(t *testing.T) { - t.Parallel() - id := "8dfafdbc3a40" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - opts := NetworkConnectionOptions{Container: "foobar"} - err := client.DisconnectNetwork(id, opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("DisconnectNetwork(%q): Wrong HTTP method. Want %s. Got %s.", id, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/networks/" + id + "/disconnect")) - if req.URL.Path != u.Path { - t.Errorf("DisconnectNetwork(%q): Wrong request path. Want %q. Got %q.", id, u.Path, req.URL.Path) - } -} - -func TestNetworkDisconnectNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such network container", status: http.StatusNotFound}) - opts := NetworkConnectionOptions{Container: "foobar"} - err := client.DisconnectNetwork("8dfafdbc3a40", opts) - if serr, ok := err.(*NoSuchNetworkOrContainer); !ok { - t.Errorf("DisconnectNetwork: wrong error type: %s.", serr) - } -} - -func TestPruneNetworks(t *testing.T) { - t.Parallel() - results := `{ - "NetworksDeleted": [ - "a", "b", "c" - ] - }` - - expected := &PruneNetworksResults{} - err := json.Unmarshal([]byte(results), expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: results, status: http.StatusOK}) - got, err := client.PruneNetworks(PruneNetworksOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("PruneNetworks: Expected %#v. Got %#v.", expected, got) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index a28ff3d1..95701590 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -143,7 +143,7 @@ type PluginDetail struct { ID string `json:"Id,omitempty" yaml:"Id,omitempty" toml:"Id,omitempty"` Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` Tag string `json:"Tag,omitempty" yaml:"Tag,omitempty" toml:"Tag,omitempty"` - Active bool `json:"Active,omitempty" yaml:"Active,omitempty" toml:"Active,omitempty"` + Active bool `json:"Enabled,omitempty" yaml:"Active,omitempty" toml:"Active,omitempty"` Settings PluginSettings `json:"Settings,omitempty" yaml:"Settings,omitempty" toml:"Settings,omitempty"` Config PluginConfig `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` } diff --git a/vendor/github.com/fsouza/go-dockerclient/plugins_test.go b/vendor/github.com/fsouza/go-dockerclient/plugins_test.go deleted file mode 100644 index f6647827..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/plugins_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "reflect" - - "testing" -) - -var ( - expectPluginDetail = PluginDetail{ - ID: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", - Name: "tiborvass/sample-volume-plugin", - Tag: "latest", - Active: true, - Settings: PluginSettings{ - Env: []string{"DEBUG=0"}, - Args: nil, - Devices: nil, - }, - Config: PluginConfig{ - Description: "A sample volume plugin for Docker", - Documentation: "https://docs.docker.com/engine/extend/plugins/", - Interface: PluginInterface{ - Types: []string{"docker.volumedriver/1.0"}, - Socket: "plugins.sock", - }, - Entrypoint: []string{ - "/usr/bin/sample-volume-plugin", - "/data", - }, - WorkDir: "", - User: PluginUser{}, - Network: PluginNetwork{Type: ""}, - Linux: PluginLinux{Capabilities: nil, AllowAllDevices: false, Devices: nil}, - Mounts: nil, - PropagatedMount: "/data", - Env: []PluginEnv{ - { - Name: "DEBUG", - Description: "If set, prints debug messages", - Settable: nil, - Value: "0", - }, - }, - Args: PluginArgs{ - Name: "args", - Description: "command line arguments", - Settable: nil, - Value: []string{}, - }, - }, - } -) - -const ( - jsonPluginDetail = `{ - "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", - "Name": "tiborvass/sample-volume-plugin", - "Tag": "latest", - "Active": true, - "Settings": { - "Env": [ - "DEBUG=0" - ], - "Args": null, - "Devices": null - }, - "Config": { - "Description": "A sample volume plugin for Docker", - "Documentation": "https://docs.docker.com/engine/extend/plugins/", - "Interface": { - "Types": [ - "docker.volumedriver/1.0" - ], - "Socket": "plugins.sock" - }, - "Entrypoint": [ - "/usr/bin/sample-volume-plugin", - "/data" - ], - "WorkDir": "", - "User": {}, - "Network": { - "Type": "" - }, - "Linux": { - "Capabilities": null, - "AllowAllDevices": false, - "Devices": null - }, - "Mounts": null, - "PropagatedMount": "/data", - "Env": [ - { - "Name": "DEBUG", - "Description": "If set, prints debug messages", - "Settable": null, - "Value": "0" - } - ], - "Args": { - "Name": "args", - "Description": "command line arguments", - "Settable": null, - "Value": [] - } - } - }` -) - -func TestListPlugins(t *testing.T) { - t.Parallel() - jsonPlugins := fmt.Sprintf("[%s]", jsonPluginDetail) - var expected []PluginDetail - err := json.Unmarshal([]byte(jsonPlugins), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonPlugins, status: http.StatusOK}) - pluginDetails, err := client.ListPlugins(context.Background()) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(pluginDetails, expected) { - t.Errorf("ListPlugins: Expected %#v. Got %#v.", expected, pluginDetails) - } -} - -func TestListFilteredPlugins(t *testing.T) { - t.Parallel() - jsonPlugins := fmt.Sprintf("[%s]", jsonPluginDetail) - var expected []PluginDetail - err := json.Unmarshal([]byte(jsonPlugins), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonPlugins, status: http.StatusOK}) - - pluginDetails, err := client.ListFilteredPlugins( - ListFilteredPluginsOptions{ - Filters: map[string][]string{ - "capability": {"volumedriver"}, - "enabled": {"true"}, - }, - Context: context.Background()}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(pluginDetails, expected) { - t.Errorf("ListPlugins: Expected %#v. Got %#v.", expected, pluginDetails) - } -} - -func TestListFilteredPluginsFailure(t *testing.T) { - t.Parallel() - var tests = []struct { - status int - message string - }{ - {400, "bad parameter"}, - {500, "internal server error"}, - } - for _, tt := range tests { - client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) - expected := Error{Status: tt.status, Message: tt.message} - pluginDetails, err := client.ListFilteredPlugins(ListFilteredPluginsOptions{}) - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("Wrong error in ListFilteredPlugins. Want %#v. Got %#v.", expected, err) - } - if len(pluginDetails) > 0 { - t.Errorf("ListFilteredPlugins failure. Expected empty list. Got %#v.", pluginDetails) - } - } -} - -func TestGetPluginPrivileges(t *testing.T) { - t.Parallel() - name := "test_plugin" - jsonPluginPrivileges := `[ { "Name": "network", "Description": "", "Value": [ "host" ] }]` - fakeRT := &FakeRoundTripper{message: jsonPluginPrivileges, status: http.StatusNoContent} - client := newTestClient(fakeRT) - expected := []PluginPrivilege{ - { - Name: "network", - Description: "", - Value: []string{"host"}, - }} - pluginPrivileges, err := client.GetPluginPrivileges(name, context.Background()) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(pluginPrivileges, expected) { - t.Errorf("PluginPrivileges: Expected %#v. Got %#v.", expected, pluginPrivileges) - } -} - -func TestInstallPlugins(t *testing.T) { - opts := InstallPluginOptions{ - Remote: "", Name: "test", - Plugins: []PluginPrivilege{ - { - Name: "network", - Description: "", - Value: []string{"host"}, - }, - }, - Context: context.Background(), - Auth: AuthConfiguration{}, - } - - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - err := client.InstallPlugins(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestInspectPlugin(t *testing.T) { - name := "test_plugin" - fakeRT := &FakeRoundTripper{message: jsonPluginDetail, status: http.StatusNoContent} - client := newTestClient(fakeRT) - pluginPrivileges, err := client.InspectPlugins(name, context.Background()) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(pluginPrivileges, &expectPluginDetail) { - t.Errorf("InspectPlugins: Expected %#v. Got %#v.", &expectPluginDetail, pluginPrivileges) - } -} - -func TestRemovePlugin(t *testing.T) { - opts := RemovePluginOptions{ - Name: "test_plugin", - Force: false, - Context: context.Background(), - } - fakeRT := &FakeRoundTripper{message: jsonPluginDetail, status: http.StatusNoContent} - client := newTestClient(fakeRT) - pluginPrivileges, err := client.RemovePlugin(opts) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(pluginPrivileges, &expectPluginDetail) { - t.Errorf("RemovePlugin: Expected %#v. Got %#v.", &expectPluginDetail, pluginPrivileges) - } -} - -func TestEnablePlugin(t *testing.T) { - opts := EnablePluginOptions{ - Name: "test", - Timeout: 5, - Context: context.Background(), - } - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - err := client.EnablePlugin(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestDisablePlugin(t *testing.T) { - opts := DisablePluginOptions{ - Name: "test", - Context: context.Background(), - } - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - err := client.DisablePlugin(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestCreatePlugin(t *testing.T) { - opts := CreatePluginOptions{ - Name: "test", - Path: "", - Context: context.Background(), - } - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - _, err := client.CreatePlugin(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestPushPlugin(t *testing.T) { - opts := PushPluginOptions{ - Name: "test", - Context: context.Background(), - } - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - err := client.PushPlugin(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestConfigurePlugin(t *testing.T) { - opts := ConfigurePluginOptions{ - Name: "test", - Envs: []string{}, - Context: context.Background(), - } - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusOK}) - err := client.ConfigurePlugin(opts) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs_test.go deleted file mode 100644 index f8b445fd..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_configs_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2017 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/docker/docker/api/types/swarm" -) - -func TestCreateConfig(t *testing.T) { - t.Parallel() - result := `{ - "Id": "d1c00f91353ab0fe368363fab76d124cc764f2db8e11832f89f5ce21c2ece675" -}` - var expected swarm.Config - err := json.Unmarshal([]byte(result), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: result, status: http.StatusOK} - client := newTestClient(fakeRT) - opts := CreateConfigOptions{} - config, err := client.CreateConfig(opts) - if err != nil { - t.Fatal(err) - } - id := "d1c00f91353ab0fe368363fab76d124cc764f2db8e11832f89f5ce21c2ece675" - if config.ID != id { - t.Errorf("CreateConfig: wrong ID. Want %q. Got %q.", id, config.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateConfig: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/configs/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateConfig: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestRemoveConfig(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "d1c00f91353ab0fe368363fab76d124cc764f2db8e11832f89f5ce21c2ece675" - opts := RemoveConfigOptions{ID: id} - err := client.RemoveConfig(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveConfig(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/configs/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveConfig(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveConfigNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such config", status: http.StatusNotFound}) - err := client.RemoveConfig(RemoveConfigOptions{ID: "a2334"}) - expected := &NoSuchConfig{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveConfig: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUpdateConfig(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "d1c00f91353ab0fe368363fab76d124cc764f2db8e11832f89f5ce21c2ece675" - update := UpdateConfigOptions{Version: 23} - err := client.UpdateConfig(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateConfig: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/configs/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateConfig: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateConfig: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateConfigOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - update.Version = 0 - if !reflect.DeepEqual(out, update) { - t.Errorf("UpdateConfig: wrong body\ngot %#v\nwant %#v", out, update) - } -} - -func TestUpdateConfigWithAuthentication(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "d1c00f91353ab0fe368363fab76d124cc764f2db8e11832f89f5ce21c2ece675" - update := UpdateConfigOptions{Version: 23} - update.Auth = AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - - err := client.UpdateConfig(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateConfig: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/configs/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateConfig: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateConfig: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateConfigOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - var updateAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("UpdateConfig: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &updateAuth) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(updateAuth, update.Auth) { - t.Errorf("UpdateConfig: wrong auth configuration. Want %#v. Got %#v", update.Auth, updateAuth) - } -} - -func TestUpdateConfigNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such Config", status: http.StatusNotFound}) - update := UpdateConfigOptions{} - err := client.UpdateConfig("notfound", update) - expected := &NoSuchConfig{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("UpdateConfig: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectConfigNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such config", status: http.StatusNotFound}) - config, err := client.InspectConfig("notfound") - if config != nil { - t.Errorf("InspectConfig: Expected Config, got %#v", config) - } - expected := &NoSuchConfig{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectConfig: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectConfig(t *testing.T) { - t.Parallel() - jsonConfig := `{ - "ID": "ktnbjxoalbkvbvedmg1urrz8h", - "Version": { - "Index": 11 - }, - "CreatedAt": "2016-11-05T01:20:17.327670065Z", - "UpdatedAt": "2016-11-05T01:20:17.327670065Z", - "Spec": { - "Name": "app-dev.crt" - } -}` - var expected swarm.Config - err := json.Unmarshal([]byte(jsonConfig), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonConfig, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "ktnbjxoalbkvbvedmg1urrz8h" - config, err := client.InspectConfig(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*config, expected) { - t.Errorf("InspectConfig(%q): Expected %#v. Got %#v.", id, expected, config) - } - expectedURL, _ := url.Parse(client.getURL("/configs/ktnbjxoalbkvbvedmg1urrz8h")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectConfig(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestListConfigs(t *testing.T) { - t.Parallel() - jsonConfigs := `[ - { - "ID": "ktnbjxoalbkvbvedmg1urrz8h", - "Version": { - "Index": 11 - }, - "CreatedAt": "2016-11-05T01:20:17.327670065Z", - "UpdatedAt": "2016-11-05T01:20:17.327670065Z", - "Spec": { - "Name": "server.conf" - } - } -]` - var expected []swarm.Config - err := json.Unmarshal([]byte(jsonConfigs), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonConfigs, status: http.StatusOK}) - configs, err := client.ListConfigs(ListConfigsOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(configs, expected) { - t.Errorf("ListConfigs: Expected %#v. Got %#v.", expected, configs) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node_test.go deleted file mode 100644 index 9236a36c..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_node_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/docker/docker/api/types/swarm" -) - -func TestListNodes(t *testing.T) { - t.Parallel() - jsonNodes := `[ - { - "ID": "24ifsmvkjbyhk", - "Version": { - "Index": 8 - }, - "CreatedAt": "2016-06-07T20:31:11.853781916Z", - "UpdatedAt": "2016-06-07T20:31:11.999868824Z", - "Spec": { - "Name": "my-node", - "Role": "manager", - "Availability": "active", - "Labels": { - "foo": "bar" - } - }, - "Description": { - "Hostname": "bf3067039e47", - "Platform": { - "Architecture": "x86_64", - "OS": "linux" - }, - "Resources": { - "NanoCPUs": 4000000000, - "MemoryBytes": 8272408576 - }, - "Engine": { - "EngineVersion": "1.12.0-dev", - "Labels": { - "foo": "bar" - }, - "Plugins": [ - { - "Type": "Volume", - "Name": "local" - }, - { - "Type": "Network", - "Name": "bridge" - }, - { - "Type": "Network", - "Name": "null" - }, - { - "Type": "Network", - "Name": "overlay" - } - ] - } - }, - "Status": { - "State": "ready" - }, - "ManagerStatus": { - "Leader": true, - "Reachability": "reachable", - "Addr": "172.17.0.2:2377" - } - } -]` - var expected []swarm.Node - err := json.Unmarshal([]byte(jsonNodes), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonNodes, status: http.StatusOK}) - nodes, err := client.ListNodes(ListNodesOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(nodes, expected) { - t.Errorf("ListNodes: Expected %#v. Got %#v.", expected, nodes) - } - -} - -func TestInspectNode(t *testing.T) { - t.Parallel() - jsonNode := `{ - "ID": "24ifsmvkjbyhk", - "Version": { - "Index": 8 - }, - "CreatedAt": "2016-06-07T20:31:11.853781916Z", - "UpdatedAt": "2016-06-07T20:31:11.999868824Z", - "Spec": { - "Name": "my-node", - "Role": "manager", - "Availability": "active", - "Labels": { - "foo": "bar" - } - }, - "Description": { - "Hostname": "bf3067039e47", - "Platform": { - "Architecture": "x86_64", - "OS": "linux" - }, - "Resources": { - "NanoCPUs": 4000000000, - "MemoryBytes": 8272408576 - }, - "Engine": { - "EngineVersion": "1.12.0-dev", - "Labels": { - "foo": "bar" - }, - "Plugins": [ - { - "Type": "Volume", - "Name": "local" - }, - { - "Type": "Network", - "Name": "bridge" - }, - { - "Type": "Network", - "Name": "null" - }, - { - "Type": "Network", - "Name": "overlay" - } - ] - } - }, - "Status": { - "State": "ready" - }, - "ManagerStatus": { - "Leader": true, - "Reachability": "reachable", - "Addr": "172.17.0.2:2377" - } -}` - - var expected swarm.Node - err := json.Unmarshal([]byte(jsonNode), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonNode, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "24ifsmvkjbyhk" - node, err := client.InspectNode(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*node, expected) { - t.Errorf("InspectNode(%q): Expected %#v. Got %#v.", id, expected, node) - } - expectedURL, _ := url.Parse(client.getURL("/nodes/24ifsmvkjbyhk")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectNode(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - -} - -func TestInspectNodeNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such node", status: http.StatusNotFound}) - node, err := client.InspectNode("notfound") - if node != nil { - t.Errorf("InspectNode: Expected task, got %#v", node) - } - expected := &NoSuchNode{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectNode: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUpdateNode(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := UpdateNodeOptions{} - err := client.UpdateNode(id, opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateNode: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/nodes/" + id + "/update")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("UpdateNode: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateNode: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateNodeOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(out, opts) { - t.Errorf("UpdateNode: wrong body, got: %#v, want %#v", out, opts) - } -} - -func TestUpdateNodeNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such node", status: http.StatusNotFound}) - err := client.UpdateNode("notfound", UpdateNodeOptions{}) - expected := &NoSuchNode{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("UpdateNode: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRemoveNode(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.RemoveNode(RemoveNodeOptions{ID: id}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveNode(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/nodes/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveNode(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveNodeNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such node", status: http.StatusNotFound}) - err := client.RemoveNode(RemoveNodeOptions{ID: "notfound"}) - expected := &NoSuchNode{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveNode: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets_test.go deleted file mode 100644 index 6b4e6f4b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets_test.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2017 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/docker/docker/api/types/swarm" -) - -func TestCreateSecret(t *testing.T) { - t.Parallel() - result := `{ - "Id": "13417726f7654bc286201f7c9accc98ccbd190efcc4753bf8ecfc0b61ef3dde8" -}` - var expected swarm.Secret - err := json.Unmarshal([]byte(result), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: result, status: http.StatusOK} - client := newTestClient(fakeRT) - opts := CreateSecretOptions{} - secret, err := client.CreateSecret(opts) - if err != nil { - t.Fatal(err) - } - id := "13417726f7654bc286201f7c9accc98ccbd190efcc4753bf8ecfc0b61ef3dde8" - if secret.ID != id { - t.Errorf("CreateSecret: wrong ID. Want %q. Got %q.", id, secret.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateSecret: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/secrets/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateSecret: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestRemoveSecret(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "13417726f7654bc286201f7c9accc98ccbd190efcc4753bf8ecfc0b61ef3dde8" - opts := RemoveSecretOptions{ID: id} - err := client.RemoveSecret(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveSecret(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/secrets/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveSecret(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveSecretNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such secret", status: http.StatusNotFound}) - err := client.RemoveSecret(RemoveSecretOptions{ID: "a2334"}) - expected := &NoSuchSecret{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveSecret: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUpdateSecret(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "13417726f7654bc286201f7c9accc98ccbd190efcc4753bf8ecfc0b61ef3dde8" - update := UpdateSecretOptions{Version: 23} - err := client.UpdateSecret(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateSecret: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/secrets/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateSecret: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateSecret: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateSecretOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - update.Version = 0 - if !reflect.DeepEqual(out, update) { - t.Errorf("UpdateSecret: wrong body\ngot %#v\nwant %#v", out, update) - } -} - -func TestUpdateSecretWithAuthentication(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "13417726f7654bc286201f7c9accc98ccbd190efcc4753bf8ecfc0b61ef3dde8" - update := UpdateSecretOptions{Version: 23} - update.Auth = AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - - err := client.UpdateSecret(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateSecret: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/secrets/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateSecret: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateSecret: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateSecretOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - var updateAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("UpdateSecret: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &updateAuth) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(updateAuth, update.Auth) { - t.Errorf("UpdateSecret: wrong auth configuration. Want %#v. Got %#v", update.Auth, updateAuth) - } -} - -func TestUpdateSecretNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such Secret", status: http.StatusNotFound}) - update := UpdateSecretOptions{} - err := client.UpdateSecret("notfound", update) - expected := &NoSuchSecret{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("UpdateSecret: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectSecretNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such secret", status: http.StatusNotFound}) - secret, err := client.InspectSecret("notfound") - if secret != nil { - t.Errorf("InspectSecret: Expected Secret, got %#v", secret) - } - expected := &NoSuchSecret{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectSecret: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectSecret(t *testing.T) { - t.Parallel() - jsonSecret := `{ - "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", - "Version": { - "Index": 11 - }, - "CreatedAt": "2016-11-05T01:20:17.327670065Z", - "UpdatedAt": "2016-11-05T01:20:17.327670065Z", - "Spec": { - "Name": "app-dev.crt", - "Labels": { - "foo": "bar" - }, - "Driver": { - "Name": "secret-bucket", - "Options": { - "OptionA": "value for driver option A", - "OptionB": "value for driver option B" - } - } - } -}` - var expected swarm.Secret - err := json.Unmarshal([]byte(jsonSecret), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonSecret, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "ak7w3gjqoa3kuz8xcpnyy0pvl" - secret, err := client.InspectSecret(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*secret, expected) { - t.Errorf("InspectSecret(%q): Expected %#v. Got %#v.", id, expected, secret) - } - expectedURL, _ := url.Parse(client.getURL("/secrets/ak7w3gjqoa3kuz8xcpnyy0pvl")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectSecret(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestListSecrets(t *testing.T) { - t.Parallel() - jsonSecrets := `[ - { - "ID": "blt1owaxmitz71s9v5zh81zun", - "Version": { - "Index": 85 - }, - "CreatedAt": "2017-07-20T13:55:28.678958722Z", - "UpdatedAt": "2017-07-20T13:55:28.678958722Z", - "Spec": { - "Name": "mysql-passwd", - "Labels": { - "some.label": "some.value" - }, - "Driver": { - "Name": "secret-bucket", - "Options": { - "OptionA": "value for driver option A", - "OptionB": "value for driver option B" - } - } - } - }, - { - "ID": "ktnbjxoalbkvbvedmg1urrz8h", - "Version": { - "Index": 11 - }, - "CreatedAt": "2016-11-05T01:20:17.327670065Z", - "UpdatedAt": "2016-11-05T01:20:17.327670065Z", - "Spec": { - "Name": "app-dev.crt", - "Labels": { - "foo": "bar" - } - } - } -]` - var expected []swarm.Secret - err := json.Unmarshal([]byte(jsonSecrets), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonSecrets, status: http.StatusOK}) - secrets, err := client.ListSecrets(ListSecretsOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(secrets, expected) { - t.Errorf("ListSecrets: Expected %#v. Got %#v.", expected, secrets) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service_test.go deleted file mode 100644 index f5eca0d8..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_service_test.go +++ /dev/null @@ -1,620 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - - "encoding/base64" - "strings" - - "github.com/docker/docker/api/types/swarm" -) - -func TestCreateService(t *testing.T) { - t.Parallel() - result := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" -}` - var expected swarm.Service - err := json.Unmarshal([]byte(result), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: result, status: http.StatusOK} - client := newTestClient(fakeRT) - opts := CreateServiceOptions{} - service, err := client.CreateService(opts) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if service.ID != id { - t.Errorf("CreateServce: wrong ID. Want %q. Got %q.", id, service.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateService: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateService: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("CreateService: caught error decoding auth. %#v", err.Error()) - } - if strings.TrimSpace(string(auth)) != "{}" { - t.Errorf("CreateService: wrong body. Want %q. Got %q.", - base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) - } -} - -func TestCreateServiceWithAuthentication(t *testing.T) { - t.Parallel() - result := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" -}` - var expected swarm.Service - err := json.Unmarshal([]byte(result), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: result, status: http.StatusOK} - client := newTestClient(fakeRT) - opts := CreateServiceOptions{} - opts.Auth = AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - service, err := client.CreateService(opts) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if service.ID != id { - t.Errorf("CreateServce: wrong ID. Want %q. Got %q.", id, service.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateService: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateService: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - var gotAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("CreateService: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &gotAuth) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotAuth, opts.Auth) { - t.Errorf("CreateService: wrong auth configuration. Want %#v. Got %#v.", opts.Auth, gotAuth) - } -} - -func TestRemoveService(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveServiceOptions{ID: id} - err := client.RemoveService(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveService(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveService(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveServiceNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such service", status: http.StatusNotFound}) - err := client.RemoveService(RemoveServiceOptions{ID: "a2334"}) - expected := &NoSuchService{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveService: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUpdateService(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - update := UpdateServiceOptions{Version: 23} - err := client.UpdateService(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateService: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateService: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateService: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateServiceOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - update.Version = 0 - if !reflect.DeepEqual(out, update) { - t.Errorf("UpdateService: wrong body\ngot %#v\nwant %#v", out, update) - } -} - -func TestUpdateServiceRollback(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - update := UpdateServiceOptions{Version: 23, Rollback: "previous"} - err := client.UpdateService(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateService: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/" + id + "/update?version=23&rollback=previous")) - if req.URL.Path != expectedURL.Path { - t.Errorf("UpdateService: Wrong path in request. Want %q. Got %q.", expectedURL.Path, req.URL.Path) - } - if !reflect.DeepEqual(req.URL.Query(), expectedURL.Query()) { - t.Errorf("UpdateService: Wrong querystring in request. Want %v. Got %v.", expectedURL.Query(), req.URL.Query()) - } -} - -func TestUpdateServiceWithAuthentication(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - update := UpdateServiceOptions{Version: 23} - update.Auth = AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - - err := client.UpdateService(id, update) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("UpdateService: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/services/" + id + "/update?version=23")) - if gotURI := req.URL.RequestURI(); gotURI != expectedURL.RequestURI() { - t.Errorf("UpdateService: Wrong path in request. Want %q. Got %q.", expectedURL.RequestURI(), gotURI) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("UpdateService: Wrong content-type in request. Want %q. Got %q.", expectedContentType, contentType) - } - var out UpdateServiceOptions - if err := json.NewDecoder(req.Body).Decode(&out); err != nil { - t.Fatal(err) - } - var updateAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("UpdateService: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &updateAuth) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(updateAuth, update.Auth) { - t.Errorf("UpdateService: wrong auth configuration. Want %#v. Got %#v", update.Auth, updateAuth) - } -} - -func TestUpdateServiceNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such service", status: http.StatusNotFound}) - update := UpdateServiceOptions{} - err := client.UpdateService("notfound", update) - expected := &NoSuchService{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("UpdateService: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectServiceNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such service", status: http.StatusNotFound}) - service, err := client.InspectService("notfound") - if service != nil { - t.Errorf("InspectService: Expected service, got %#v", service) - } - expected := &NoSuchService{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectService: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectService(t *testing.T) { - t.Parallel() - jsonService := `{ - "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", - "Version": { - "Index": 95 - }, - "CreatedAt": "2016-06-07T21:10:20.269723157Z", - "UpdatedAt": "2016-06-07T21:10:20.276301259Z", - "Spec": { - "Name": "redis", - "Task": { - "ContainerSpec": { - "Image": "redis" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "ANY" - }, - "Placement": {} - }, - "Mode": { - "Replicated": { - "Replicas": 1 - } - }, - "UpdateConfig": { - "Parallelism": 1 - }, - "EndpointSpec": { - "Mode": "VIP", - "Ingress": "PUBLICPORT", - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379 - } - ] - } - }, - "Endpoint": { - "Spec": {}, - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379, - "PublicPort": 30001 - } - ], - "VirtualIPs": [ - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.4/16" - } - ] - } -}` - var expected swarm.Service - err := json.Unmarshal([]byte(jsonService), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonService, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "ak7w3gjqoa3kuz8xcpnyy0pvl" - service, err := client.InspectService(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*service, expected) { - t.Errorf("InspectService(%q): Expected %#v. Got %#v.", id, expected, service) - } - expectedURL, _ := url.Parse(client.getURL("/services/ak7w3gjqoa3kuz8xcpnyy0pvl")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectService(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestListServices(t *testing.T) { - t.Parallel() - jsonServices := `[ - { - "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", - "Version": { - "Index": 19 - }, - "CreatedAt": "2016-06-07T21:05:51.880065305Z", - "UpdatedAt": "2016-06-07T21:07:29.962229872Z", - "Spec": { - "Name": "hopeful_cori", - "TaskTemplate": { - "ContainerSpec": { - "Image": "redis" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "ANY" - }, - "Placement": {} - }, - "Mode": { - "Replicated": { - "Replicas": 1 - } - }, - "UpdateConfig": { - "Parallelism": 1 - }, - "EndpointSpec": { - "Mode": "VIP", - "Ingress": "PUBLICPORT", - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379 - } - ] - } - }, - "Endpoint": { - "Spec": {}, - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379, - "PublicPort": 30000 - } - ], - "VirtualIPs": [ - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.2/16" - }, - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.3/16" - } - ] - } - } -]` - var expected []swarm.Service - err := json.Unmarshal([]byte(jsonServices), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonServices, status: http.StatusOK}) - services, err := client.ListServices(ListServicesOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(services, expected) { - t.Errorf("ListServices: Expected %#v. Got %#v.", expected, services) - } -} - -/// ##################################################"" - -func TestGetServiceLogs(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsServiceOptions{ - Service: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.GetServiceLogs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/services/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"all"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TesGetServicetLogsNilStdoutDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsServiceOptions{ - Service: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.GetServiceLogs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestGetServiceLogsNilStderrDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsServiceOptions{ - Service: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.GetServiceLogs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestGetServiceLogsSpecifyingTail(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsServiceOptions{ - Service: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.GetServiceLogs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/services/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"100"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestGetServiceLogsRawTerminal(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsServiceOptions{ - Service: "a123456", - OutputStream: &buf, - Follow: true, - RawTerminal: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.GetServiceLogs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } -} - -func TestGetServiceLogsNoContainer(t *testing.T) { - var client Client - err := client.GetServiceLogs(LogsServiceOptions{}) - expected := &NoSuchService{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task_test.go deleted file mode 100644 index 92083023..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_task_test.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/docker/docker/api/types/swarm" -) - -func TestListTasks(t *testing.T) { - t.Parallel() - jsonTasks := `[ - { - "ID": "0kzzo1i0y4jz6027t0k7aezc7", - "Version": { - "Index": 71 - }, - "CreatedAt": "2016-06-07T21:07:31.171892745Z", - "UpdatedAt": "2016-06-07T21:07:31.376370513Z", - "Name": "hopeful_cori", - "Spec": { - "ContainerSpec": { - "Image": "redis" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "ANY" - }, - "Placement": {} - }, - "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", - "Instance": 1, - "NodeID": "24ifsmvkjbyhk", - "ServiceAnnotations": {}, - "Status": { - "Timestamp": "2016-06-07T21:07:31.290032978Z", - "State": "FAILED", - "Message": "execution failed", - "ContainerStatus": {} - }, - "DesiredState": "SHUTDOWN", - "NetworksAttachments": [ - { - "Network": { - "ID": "4qvuz4ko70xaltuqbt8956gd1", - "Version": { - "Index": 18 - }, - "CreatedAt": "2016-06-07T20:31:11.912919752Z", - "UpdatedAt": "2016-06-07T21:07:29.955277358Z", - "Spec": { - "Name": "ingress", - "Labels": { - "com.docker.swarm.internal": "true" - }, - "DriverConfiguration": {}, - "IPAM": { - "Driver": {}, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "DriverState": { - "Name": "overlay", - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "256" - } - }, - "IPAM": { - "Driver": { - "Name": "default" - }, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "Addresses": [ - "10.255.0.10/16" - ] - } - ], - "Endpoint": { - "Spec": {}, - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379, - "PublicPort": 30000 - } - ], - "VirtualIPs": [ - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.2/16" - }, - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.3/16" - } - ] - } - }, - { - "ID": "1yljwbmlr8er2waf8orvqpwms", - "Version": { - "Index": 30 - }, - "CreatedAt": "2016-06-07T21:07:30.019104782Z", - "UpdatedAt": "2016-06-07T21:07:30.231958098Z", - "Name": "hopeful_cori", - "Spec": { - "ContainerSpec": { - "Image": "redis" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "ANY" - }, - "Placement": {} - }, - "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", - "Instance": 1, - "NodeID": "24ifsmvkjbyhk", - "ServiceAnnotations": {}, - "Status": { - "Timestamp": "2016-06-07T21:07:30.202183143Z", - "State": "FAILED", - "Message": "execution failed", - "ContainerStatus": {} - }, - "DesiredState": "SHUTDOWN", - "NetworksAttachments": [ - { - "Network": { - "ID": "4qvuz4ko70xaltuqbt8956gd1", - "Version": { - "Index": 18 - }, - "CreatedAt": "2016-06-07T20:31:11.912919752Z", - "UpdatedAt": "2016-06-07T21:07:29.955277358Z", - "Spec": { - "Name": "ingress", - "Labels": { - "com.docker.swarm.internal": "true" - }, - "DriverConfiguration": {}, - "IPAM": { - "Driver": {}, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "DriverState": { - "Name": "overlay", - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "256" - } - }, - "IPAM": { - "Driver": { - "Name": "default" - }, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "Addresses": [ - "10.255.0.5/16" - ] - } - ], - "Endpoint": { - "Spec": {}, - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379, - "PublicPort": 30000 - } - ], - "VirtualIPs": [ - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.2/16" - }, - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.3/16" - } - ] - } - } -]` - var expected []swarm.Task - err := json.Unmarshal([]byte(jsonTasks), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonTasks, status: http.StatusOK}) - tasks, err := client.ListTasks(ListTasksOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tasks, expected) { - t.Errorf("ListTasks: Expected %#v. Got %#v.", expected, tasks) - } - -} - -func TestInspectTask(t *testing.T) { - t.Parallel() - jsonTask := `{ - "ID": "0kzzo1i0y4jz6027t0k7aezc7", - "Version": { - "Index": 71 - }, - "CreatedAt": "2016-06-07T21:07:31.171892745Z", - "UpdatedAt": "2016-06-07T21:07:31.376370513Z", - "Name": "hopeful_cori", - "Spec": { - "ContainerSpec": { - "Image": "redis" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "ANY" - }, - "Placement": {} - }, - "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", - "Instance": 1, - "NodeID": "24ifsmvkjbyhk", - "ServiceAnnotations": {}, - "Status": { - "Timestamp": "2016-06-07T21:07:31.290032978Z", - "State": "FAILED", - "Message": "execution failed", - "ContainerStatus": {} - }, - "DesiredState": "SHUTDOWN", - "NetworksAttachments": [ - { - "Network": { - "ID": "4qvuz4ko70xaltuqbt8956gd1", - "Version": { - "Index": 18 - }, - "CreatedAt": "2016-06-07T20:31:11.912919752Z", - "UpdatedAt": "2016-06-07T21:07:29.955277358Z", - "Spec": { - "Name": "ingress", - "Labels": { - "com.docker.swarm.internal": "true" - }, - "DriverConfiguration": {}, - "IPAM": { - "Driver": {}, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "DriverState": { - "Name": "overlay", - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "256" - } - }, - "IPAM": { - "Driver": { - "Name": "default" - }, - "Configs": [ - { - "Family": "UNKNOWN", - "Subnet": "10.255.0.0/16" - } - ] - } - }, - "Addresses": [ - "10.255.0.10/16" - ] - } - ], - "Endpoint": { - "Spec": {}, - "ExposedPorts": [ - { - "Protocol": "tcp", - "Port": 6379, - "PublicPort": 30000 - } - ], - "VirtualIPs": [ - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.2/16" - }, - { - "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", - "Addr": "10.255.0.3/16" - } - ] - } -}` - - var expected swarm.Task - err := json.Unmarshal([]byte(jsonTask), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonTask, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "0kzzo1i0y4jz6027t0k7aezc7" - task, err := client.InspectTask(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*task, expected) { - t.Errorf("InspectTask(%q): Expected %#v. Got %#v.", id, expected, task) - } - expectedURL, _ := url.Parse(client.getURL("/tasks/0kzzo1i0y4jz6027t0k7aezc7")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectTask(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - -} - -func TestInspectTaskNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such task", status: http.StatusNotFound}) - task, err := client.InspectTask("notfound") - if task != nil { - t.Errorf("InspectTask: Expected task, got %#v", task) - } - expected := &NoSuchTask{ID: "notfound"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectTask: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_test.go b/vendor/github.com/fsouza/go-dockerclient/swarm_test.go deleted file mode 100644 index 4023541b..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_test.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2016 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/docker/docker/api/types/swarm" -) - -func TestInitSwarm(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: `"body"`, status: http.StatusOK} - client := newTestClient(fakeRT) - response, err := client.InitSwarm(InitSwarmOptions{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("InitSwarm: Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/swarm/init")) - if req.URL.Path != u.Path { - t.Errorf("InitSwarm: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expected := "body" - if response != expected { - t.Errorf("InitSwarm: Wrong response. Want %q. Got %q.", expected, response) - } -} - -func TestInitSwarmAlreadyInSwarm(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusNotAcceptable}) - _, err := client.InitSwarm(InitSwarmOptions{}) - if err != ErrNodeAlreadyInSwarm { - t.Errorf("InitSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeAlreadyInSwarm, err) - } - client = newTestClient(&FakeRoundTripper{message: "", status: http.StatusServiceUnavailable}) - _, err = client.InitSwarm(InitSwarmOptions{}) - if err != ErrNodeAlreadyInSwarm { - t.Errorf("InitSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeAlreadyInSwarm, err) - } -} - -func TestJoinSwarm(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.JoinSwarm(JoinSwarmOptions{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("JoinSwarm: Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/swarm/join")) - if req.URL.Path != u.Path { - t.Errorf("JoinSwarm: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestJoinSwarmAlreadyInSwarm(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusNotAcceptable}) - err := client.JoinSwarm(JoinSwarmOptions{}) - if err != ErrNodeAlreadyInSwarm { - t.Errorf("JoinSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeAlreadyInSwarm, err) - } - client = newTestClient(&FakeRoundTripper{message: "", status: http.StatusServiceUnavailable}) - err = client.JoinSwarm(JoinSwarmOptions{}) - if err != ErrNodeAlreadyInSwarm { - t.Errorf("JoinSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeAlreadyInSwarm, err) - } -} - -func TestLeaveSwarm(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var testData = []struct { - force bool - expectedURI string - }{ - {false, "/swarm/leave?force=false"}, - {true, "/swarm/leave?force=true"}, - } - for i, tt := range testData { - err := client.LeaveSwarm(LeaveSwarmOptions{Force: tt.force}) - if err != nil { - t.Fatal(err) - } - expectedMethod := "POST" - req := fakeRT.requests[i] - if req.Method != expectedMethod { - t.Errorf("LeaveSwarm: Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - expected, _ := url.Parse(client.getURL(tt.expectedURI)) - if req.URL.String() != expected.String() { - t.Errorf("LeaveSwarm: Wrong request string. Want %q. Got %q.", expected.String(), req.URL.String()) - } - } -} - -func TestLeaveSwarmNotInSwarm(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusNotAcceptable}) - err := client.LeaveSwarm(LeaveSwarmOptions{}) - if err != ErrNodeNotInSwarm { - t.Errorf("LeaveSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } - client = newTestClient(&FakeRoundTripper{message: "", status: http.StatusServiceUnavailable}) - err = client.LeaveSwarm(LeaveSwarmOptions{}) - if err != ErrNodeNotInSwarm { - t.Errorf("LeaveSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } -} - -func TestUpdateSwarm(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := UpdateSwarmOptions{ - Version: 10, - RotateManagerToken: true, - RotateWorkerToken: false, - } - err := client.UpdateSwarm(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("UpdateSwarm: Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - expectedPath := "/swarm/update" - if req.URL.Path != expectedPath { - t.Errorf("UpdateSwarm: Wrong request path. Want %q. Got %q.", expectedPath, req.URL.Path) - } - expected := map[string][]string{ - "version": {"10"}, - "rotateManagerToken": {"true"}, - "rotateWorkerToken": {"false"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("UpdateSwarm: Wrong request query. Want %v. Got %v", expected, got) - } -} - -func TestUpdateSwarmNotInSwarm(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusNotAcceptable}) - err := client.UpdateSwarm(UpdateSwarmOptions{}) - if err != ErrNodeNotInSwarm { - t.Errorf("UpdateSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } - client = newTestClient(&FakeRoundTripper{message: "", status: http.StatusServiceUnavailable}) - err = client.UpdateSwarm(UpdateSwarmOptions{}) - if err != ErrNodeNotInSwarm { - t.Errorf("UpdateSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } -} - -func TestInspectSwarm(t *testing.T) { - t.Parallel() - fakeRT := &FakeRoundTripper{message: `{"ID": "123"}`, status: http.StatusOK} - client := newTestClient(fakeRT) - response, err := client.InspectSwarm(nil) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "GET" - if req.Method != expectedMethod { - t.Errorf("InspectSwarm: Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/swarm")) - if req.URL.Path != u.Path { - t.Errorf("InspectSwarm: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expected := swarm.Swarm{ClusterInfo: swarm.ClusterInfo{ID: "123"}} - if !reflect.DeepEqual(expected, response) { - t.Errorf("InspectSwarm: Wrong response. Want %#v. Got %#v.", expected, response) - } -} - -func TestInspectSwarmNotInSwarm(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "", status: http.StatusNotAcceptable}) - _, err := client.InspectSwarm(nil) - if err != ErrNodeNotInSwarm { - t.Errorf("InspectSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } - client = newTestClient(&FakeRoundTripper{message: "", status: http.StatusServiceUnavailable}) - _, err = client.InspectSwarm(nil) - if err != ErrNodeNotInSwarm { - t.Errorf("InspectSwarm: Wrong error type. Want %#v. Got %#v", ErrNodeNotInSwarm, err) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index 9716a771..611da8c9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -13,8 +13,8 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" + "github.com/fsouza/go-dockerclient/internal/archive" ) func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go index bb5790b5..5f0e2e31 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tls.go +++ b/vendor/github.com/fsouza/go-dockerclient/tls.go @@ -109,10 +109,10 @@ func copyTLSConfig(cfg *tls.Config) *tls.Config { NameToCertificate: cfg.NameToCertificate, NextProtos: cfg.NextProtos, PreferServerCipherSuites: cfg.PreferServerCipherSuites, - Rand: cfg.Rand, - RootCAs: cfg.RootCAs, - ServerName: cfg.ServerName, - SessionTicketKey: cfg.SessionTicketKey, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, + Rand: cfg.Rand, + RootCAs: cfg.RootCAs, + ServerName: cfg.ServerName, + SessionTicketKey: cfg.SessionTicketKey, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, } } diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go index 021a262b..c8f50469 100644 --- a/vendor/github.com/fsouza/go-dockerclient/volume.go +++ b/vendor/github.com/fsouza/go-dockerclient/volume.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "net/http" + "time" ) var ( @@ -28,6 +29,7 @@ type Volume struct { Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty" toml:"Mountpoint,omitempty"` Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` + CreatedAt time.Time `json:"CreatedAt,omitempty" yaml:"CreatedAt,omitempty" toml:"CreatedAt,omitempty"` } // ListVolumesOptions specify parameters to the ListVolumes function. diff --git a/vendor/github.com/fsouza/go-dockerclient/volume_test.go b/vendor/github.com/fsouza/go-dockerclient/volume_test.go deleted file mode 100644 index d30ae181..00000000 --- a/vendor/github.com/fsouza/go-dockerclient/volume_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListVolumes(t *testing.T) { - t.Parallel() - volumesData := `[ - { - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }, - { - "Name": "foo", - "Driver": "bar", - "Mountpoint": "/var/lib/docker/volumes/bar" - } -]` - body := `{ "Volumes": ` + volumesData + ` }` - var expected []Volume - if err := json.Unmarshal([]byte(volumesData), &expected); err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - volumes, err := client.ListVolumes(ListVolumesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(volumes, expected) { - t.Errorf("ListVolumes: Wrong return value. Want %#v. Got %#v.", expected, volumes) - } -} - -func TestCreateVolume(t *testing.T) { - t.Parallel() - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - volume, err := client.CreateVolume( - CreateVolumeOptions{ - Name: "tardis", - Driver: "local", - DriverOpts: map[string]string{ - "foo": "bar", - }, - }, - ) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("CreateVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("CreateVolume(): Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/create")) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInspectVolume(t *testing.T) { - t.Parallel() - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis", - "Options": { - "foo": "bar" - } - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - name := "tardis" - volume, err := client.InspectVolume(name) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("InspectVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "GET" - if req.Method != expectedMethod { - t.Errorf("InspectVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolume(t *testing.T) { - t.Parallel() - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - if err := client.RemoveVolume(name); err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolumeWithOptions(t *testing.T) { - t.Parallel() - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - if err := client.RemoveVolumeWithOptions(RemoveVolumeOptions{ - Name: name, - Force: true, - }); err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name + "?force=1")) - if req.URL.RequestURI() != u.RequestURI() { - t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.RequestURI(), req.URL.RequestURI()) - } -} - -func TestRemoveVolumeNotFound(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "no such volume", status: http.StatusNotFound}) - if err := client.RemoveVolume("test:"); err != ErrNoSuchVolume { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrNoSuchVolume, err) - } -} - -func TestRemoveVolumeInternalError(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "something went wrong", status: http.StatusInternalServerError}) - if err := client.RemoveVolume("test:test"); err == nil { - t.Error("RemoveVolume: unexpected error") - } -} - -func TestRemoveVolumeInUse(t *testing.T) { - t.Parallel() - client := newTestClient(&FakeRoundTripper{message: "volume in use and cannot be removed", status: http.StatusConflict}) - if err := client.RemoveVolume("test:"); err != ErrVolumeInUse { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrVolumeInUse, err) - } -} - -func TestPruneVolumes(t *testing.T) { - t.Parallel() - results := `{ - "VolumesDeleted": [ - "a", "b", "c" - ], - "SpaceReclaimed": 123 - }` - - expected := &PruneVolumesResults{} - err := json.Unmarshal([]byte(results), expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: results, status: http.StatusOK}) - got, err := client.PruneVolumes(PruneVolumesOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("PruneContainers: Expected %#v. Got %#v.", expected, got) - } -} diff --git a/vendor/github.com/gogo/protobuf/.gitignore b/vendor/github.com/gogo/protobuf/.gitignore deleted file mode 100644 index 76009479..00000000 --- a/vendor/github.com/gogo/protobuf/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -._* -*.js -*.js.map diff --git a/vendor/github.com/gogo/protobuf/.mailmap b/vendor/github.com/gogo/protobuf/.mailmap deleted file mode 100644 index bc001021..00000000 --- a/vendor/github.com/gogo/protobuf/.mailmap +++ /dev/null @@ -1,8 +0,0 @@ -Walter Schulze Walter Schulze -Walter Schulze -Walter Schulze awalterschulze -Walter Schulze awalterschulze@gmail.com -John Tuley -Anton Povarov -Denis Smirnov dennwc -DongYun Kang \ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/.travis.yml b/vendor/github.com/gogo/protobuf/.travis.yml deleted file mode 100644 index 12302e00..00000000 --- a/vendor/github.com/gogo/protobuf/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -env: - - PROTOBUF_VERSION=2.6.1 - - PROTOBUF_VERSION=3.0.2 - - PROTOBUF_VERSION=3.5.1 - -before_install: - - ./install-protobuf.sh - - PATH=/home/travis/bin:$PATH protoc --version - -script: - - PATH=/home/travis/bin:$PATH make buildserverall - - echo $TRAVIS_GO_VERSION - - if [[ "$PROTOBUF_VERSION" == "3.5.1" ]] && [[ "$TRAVIS_GO_VERSION" =~ ^1\.9\.[0-9]+$ ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi - -language: go - -go: - - 1.8.x - - 1.9.x - - 1.10beta1 diff --git a/vendor/github.com/gogo/protobuf/Makefile b/vendor/github.com/gogo/protobuf/Makefile deleted file mode 100644 index 1d9ad1f5..00000000 --- a/vendor/github.com/gogo/protobuf/Makefile +++ /dev/null @@ -1,162 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -GO_VERSION:=$(shell go version) - -.PHONY: nuke regenerate tests clean install gofmt vet contributors - -all: clean install regenerate install tests errcheck vet - -buildserverall: clean install regenerate install tests vet js - -install: - go install ./proto - go install ./gogoproto - go install ./jsonpb - go install ./protoc-gen-gogo - go install ./protoc-gen-gofast - go install ./protoc-gen-gogofast - go install ./protoc-gen-gogofaster - go install ./protoc-gen-gogoslick - go install ./protoc-gen-gostring - go install ./protoc-min-version - go install ./protoc-gen-combo - go install ./gogoreplace - -clean: - go clean ./... - -nuke: - go clean -i ./... - -gofmt: - gofmt -l -s -w . - -regenerate: - make -C protoc-gen-gogo/descriptor regenerate - make -C protoc-gen-gogo/plugin regenerate - make -C protoc-gen-gogo/testdata regenerate - make -C gogoproto regenerate - make -C proto/testdata regenerate - make -C jsonpb/jsonpb_test_proto regenerate - make -C _conformance regenerate - make -C types regenerate - make -C test regenerate - make -C test/example regenerate - make -C test/unrecognized regenerate - make -C test/group regenerate - make -C test/unrecognizedgroup regenerate - make -C test/enumstringer regenerate - make -C test/unmarshalmerge regenerate - make -C test/moredefaults regenerate - make -C test/issue8 regenerate - make -C test/enumprefix regenerate - make -C test/enumcustomname regenerate - make -C test/packed regenerate - make -C test/protosize regenerate - make -C test/tags regenerate - make -C test/oneof regenerate - make -C test/oneof3 regenerate - make -C test/theproto3 regenerate - make -C test/mapdefaults regenerate - make -C test/mapsproto2 regenerate - make -C test/issue42order regenerate - make -C proto generate-test-pbs - make -C test/importdedup regenerate - make -C test/importduplicate regenerate - make -C test/custombytesnonstruct regenerate - make -C test/required regenerate - make -C test/casttype regenerate - make -C test/castvalue regenerate - make -C vanity/test regenerate - make -C test/sizeunderscore regenerate - make -C test/issue34 regenerate - make -C test/empty-issue70 regenerate - make -C test/indeximport-issue72 regenerate - make -C test/fuzztests regenerate - make -C test/oneofembed regenerate - make -C test/asymetric-issue125 regenerate - make -C test/filedotname regenerate - make -C test/nopackage regenerate - make -C test/types regenerate - make -C test/proto3extension regenerate - make -C test/stdtypes regenerate - make -C test/data regenerate - make -C test/typedecl regenerate - make -C test/issue260 regenerate - make -C test/issue261 regenerate - make -C test/issue262 regenerate - make -C test/issue312 regenerate - make -C test/enumdecl regenerate - make -C test/typedecl_all regenerate - make -C test/enumdecl_all regenerate - make -C test/int64support regenerate - make -C test/issue322 regenerate - make -C test/issue330 regenerate - make gofmt - -tests: - go build ./test/enumprefix - go test ./... - (cd test/stdtypes && make test) - -vet: - go vet ./... - go tool vet --shadow . - -errcheck: - go get github.com/kisielk/errcheck - errcheck ./test/... - -drone: - sudo apt-get install protobuf-compiler - (cd $(GOPATH)/src/github.com/gogo/protobuf && make buildserverall) - -testall: - go get -u github.com/golang/protobuf/proto - make -C protoc-gen-gogo/testdata test - make -C vanity/test test - make -C test/registration test - make tests - -bench: - go get golang.org/x/tools/cmd/benchcmp - (cd test/mixbench && go build .) - ./test/mixbench/mixbench - -contributors: - git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS - -js: -ifeq (go1.9, $(findstring go1.9, $(GO_VERSION))) - go get -u github.com/gopherjs/gopherjs - gopherjs build github.com/gogo/protobuf/protoc-gen-gogo -endif - -update: - (cd protobuf && make update) diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README deleted file mode 100644 index 035426df..00000000 --- a/vendor/github.com/gogo/protobuf/README +++ /dev/null @@ -1,258 +0,0 @@ -GoGoProtobuf http://github.com/gogo/protobuf extends -GoProtobuf http://github.com/golang/protobuf - -# Go support for Protocol Buffers - -Google's data interchange format. -Copyright 2010 The Go Authors. -https://github.com/golang/protobuf - -This package and the code it generates requires at least Go 1.4. - -This software implements Go bindings for protocol buffers. For -information about protocol buffers themselves, see - https://developers.google.com/protocol-buffers/ - -## Installation ## - -To use this software, you must: -- Install the standard C++ implementation of protocol buffers from - https://developers.google.com/protocol-buffers/ -- Of course, install the Go compiler and tools from - https://golang.org/ - See - https://golang.org/doc/install - for details or, if you are using gccgo, follow the instructions at - https://golang.org/doc/install/gccgo -- Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. - The compiler plugin, protoc-gen-go, will be installed in $GOBIN, - defaulting to $GOPATH/bin. It must be in your $PATH for the protocol - compiler, protoc, to find it. - -This software has two parts: a 'protocol compiler plugin' that -generates Go source files that, once compiled, can access and manage -protocol buffers; and a library that implements run-time support for -encoding (marshaling), decoding (unmarshaling), and accessing protocol -buffers. - -There is support for gRPC in Go using protocol buffers. -See the note at the bottom of this file for details. - -There are no insertion points in the plugin. - -GoGoProtobuf provides extensions for protocol buffers and GoProtobuf -see http://github.com/gogo/protobuf/gogoproto/doc.go - -## Using protocol buffers with Go ## - -Once the software is installed, there are two steps to using it. -First you must compile the protocol buffer definitions and then import -them, with the support library, into your program. - -To compile the protocol buffer definition, run protoc with the --gogo_out -parameter set to the directory you want to output the Go code to. - - protoc --gogo_out=. *.proto - -The generated files will be suffixed .pb.go. See the Test code below -for an example using such a file. - -The package comment for the proto library contains text describing -the interface provided in Go for protocol buffers. Here is an edited -version. - -If you are using any gogo.proto extensions you will need to specify the -proto_path to include the descriptor.proto and gogo.proto. -gogo.proto is located in github.com/gogo/protobuf/gogoproto -This should be fine, since your import is the same. -descriptor.proto is located in either github.com/gogo/protobuf/protobuf -or code.google.com/p/protobuf/trunk/src/ -Its import is google/protobuf/descriptor.proto so it might need some help. - - protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto - -========== - -The proto package converts data structures to and from the -wire format of protocol buffers. It works in concert with the -Go source code generated for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -Consider file test.proto, containing - -```proto - syntax = "proto2"; - package example; - - enum FOO { X = 17; }; - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } -``` - -To create and play with a Test object from the example package, - -```go - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - "path/to/example" - ) - - func main() { - test := &example.Test { - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &example.Test_OptionalGroup { - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &example.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -``` - - -## Parameters ## - -To pass extra parameters to the plugin, use a comma-separated -parameter list separated from the output directory by a colon: - - - protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto - - -- `import_prefix=xxx` - a prefix that is added onto the beginning of - all imports. Useful for things like generating protos in a - subdirectory, or regenerating vendored protobufs in-place. -- `import_path=foo/bar` - used as the package if no input files - declare `go_package`. If it contains slashes, everything up to the - rightmost slash is ignored. -- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to - load. The only plugin in this repo is `grpc`. -- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is - associated with Go package quux/shme. This is subject to the - import_prefix parameter. - -## gRPC Support ## - -If a proto file specifies RPC services, protoc-gen-go can be instructed to -generate code compatible with gRPC (http://www.grpc.io/). To do this, pass -the `plugins` parameter to protoc-gen-go; the usual way is to insert it into -the --go_out argument to protoc: - - protoc --gogo_out=plugins=grpc:. *.proto - -## Compatibility ## - -The library and the generated code are expected to be stable over time. -However, we reserve the right to make breaking changes without notice for the -following reasons: - -- Security. A security issue in the specification or implementation may come to - light whose resolution requires breaking compatibility. We reserve the right - to address such security issues. -- Unspecified behavior. There are some aspects of the Protocol Buffers - specification that are undefined. Programs that depend on such unspecified - behavior may break in future releases. -- Specification errors or changes. If it becomes necessary to address an - inconsistency, incompleteness, or change in the Protocol Buffers - specification, resolving the issue could affect the meaning or legality of - existing programs. We reserve the right to address such issues, including - updating the implementations. -- Bugs. If the library has a bug that violates the specification, a program - that depends on the buggy behavior may break if the bug is fixed. We reserve - the right to fix such bugs. -- Adding methods or fields to generated structs. These may conflict with field - names that already exist in a schema, causing applications to break. When the - code generator encounters a field in the schema that would collide with a - generated field or method name, the code generator will append an underscore - to the generated field or method name. -- Adding, removing, or changing methods or fields in generated structs that - start with `XXX`. These parts of the generated code are exported out of - necessity, but should not be considered part of the public API. -- Adding, removing, or changing unexported symbols in generated code. - -Any breaking changes outside of these will be announced 6 months in advance to -protobuf@googlegroups.com. - -You should, whenever possible, use generated code created by the `protoc-gen-go` -tool built at the same commit as the `proto` package. The `proto` package -declares package-level constants in the form `ProtoPackageIsVersionX`. -Application code and generated code may depend on one of these constants to -ensure that compilation will fail if the available version of the proto library -is too old. Whenever we make a change to the generated code that requires newer -library support, in the same commit we will increment the version number of the -generated code and declare a new package-level constant whose name incorporates -the latest version number. Removing a compatibility constant is considered a -breaking change and would be subject to the announcement policy stated above. - -## Plugins ## - -The `protoc-gen-go/generator` package exposes a plugin interface, -which is used by the gRPC code generation. This interface is not -supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md deleted file mode 100644 index a4ad3eec..00000000 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ /dev/null @@ -1,139 +0,0 @@ -# Protocol Buffers for Go with Gadgets - -[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) - -gogoprotobuf is a fork of golang/protobuf with extra code generation features. - -This code generation is used to achieve: - - - fast marshalling and unmarshalling - - more canonical Go structures - - goprotobuf compatibility - - less typing by optionally generating extra helper code - - peace of mind by optionally generating test and benchmark code - - other serialization formats - -Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this -issue - -## Users - -These projects use gogoprotobuf: - - - etcd - blog - sample proto file - - spacemonkey - blog - - badoo - sample proto file - - mesos-go - sample proto file - - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com - - cockroachdb - sample proto file - - go-ipfs - sample proto file - - rkive-go - sample proto file - - dropbox - - srclib - sample proto file - - adyoulike - - cloudfoundry - sample proto file - - kubernetes - go2idl built on top of gogoprotobuf - - dgraph - release notes - benchmarks - - centrifugo - release notes - blog - - docker swarmkit - sample proto file - - nats.io - go-nats-streaming - - tidb - Communication between tidb and tikv - - protoactor-go - vanity command that also generates actors from service definitions - - containerd - vanity command with custom field names that conforms to the golang convention. - - nakama - - proteus - - carbonzipper stack - - sendgrid - - zero-os/0-stor - -Please let us know if you are using gogoprotobuf by posting on our GoogleGroup. - -### Mentioned - - - Cloudflare - go serialization talk - Albert Strasheim - - GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson - - alecthomas' go serialization benchmarks - -## Getting Started - -There are several ways to use gogoprotobuf, but for all you need to install go and protoc. -After that you can choose: - - - Speed - - More Speed and more generated code - - Most Speed and most customization - -### Installation - -To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Latest patch versions of Go 1.8, 1.9 and 1.10 are continuously tested. - -Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). -Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested. - -### Speed - -Install the protoc-gen-gofast binary - - go get github.com/gogo/protobuf/protoc-gen-gofast - -Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. - - protoc --gofast_out=. myproto.proto - -This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). - -### More Speed and more generated code - -Fields without pointers cause less time in the garbage collector. -More code generation results in more convenient methods. - -Other binaries are also included: - - protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) - protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) - protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) - -Installing any of these binaries is easy. Simply run: - - go get github.com/gogo/protobuf/proto - go get github.com/gogo/protobuf/{binary} - go get github.com/gogo/protobuf/gogoproto - -These binaries allow you to use gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). You can also use your own binary. - -To generate the code, you also need to set the include path properly. - - protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=. myproto.proto - -To use proto files from "google/protobuf" you need to add additional args to protoc. - - protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=\ - Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\ - Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\ - Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\ - Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\ - Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \ - myproto.proto - -Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen". - -### Most Speed and most customization - -Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. -gogoprotobuf also offers more serialization formats and generation of tests and even more methods. - -Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. - -Install protoc-gen-gogo: - - go get github.com/gogo/protobuf/proto - go get github.com/gogo/protobuf/jsonpb - go get github.com/gogo/protobuf/protoc-gen-gogo - go get github.com/gogo/protobuf/gogoproto - -## GRPC - -It works the same as golang/protobuf, simply specify the plugin. -Here is an example using gofast: - - protoc --gofast_out=plugins=grpc:. my.proto diff --git a/vendor/github.com/gogo/protobuf/bench.md b/vendor/github.com/gogo/protobuf/bench.md deleted file mode 100644 index 16da66ad..00000000 --- a/vendor/github.com/gogo/protobuf/bench.md +++ /dev/null @@ -1,190 +0,0 @@ -# Benchmarks - -## How to reproduce - -For a comparison run: - - make bench - -followed by [benchcmp](http://code.google.com/p/go/source/browse/misc/benchcmp benchcmp) on the resulting files: - - $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshaler.txt - $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshaler.txt - -Benchmarks ran on Revision: 11c56be39364 - -June 2013 - -Processor 2,66 GHz Intel Core i7 - -Memory 8 GB 1067 MHz DDR3 - -## Marshaler - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoMarshal2656889-66.53%
BenchmarkNinOptNativeProtoMarshal26511015-61.71%
BenchmarkNidRepNativeProtoMarshal4266112519-70.65%
BenchmarkNinRepNativeProtoMarshal4230612354-70.80%
BenchmarkNidRepPackedNativeProtoMarshal3414811902-65.15%
BenchmarkNinRepPackedNativeProtoMarshal3337511969-64.14%
BenchmarkNidOptStructProtoMarshal71483727-47.86%
BenchmarkNinOptStructProtoMarshal69563481-49.96%
BenchmarkNidRepStructProtoMarshal4655119492-58.13%
BenchmarkNinRepStructProtoMarshal4671519043-59.24%
BenchmarkNidEmbeddedStructProtoMarshal52312050-60.81%
BenchmarkNinEmbeddedStructProtoMarshal46652000-57.13%
BenchmarkNidNestedStructProtoMarshal181106103604-42.79%
BenchmarkNinNestedStructProtoMarshal182053102069-43.93%
BenchmarkNidOptCustomProtoMarshal1209310-74.36%
BenchmarkNinOptCustomProtoMarshal1435277-80.70%
BenchmarkNidRepCustomProtoMarshal4126763-81.51%
BenchmarkNinRepCustomProtoMarshal3972769-80.64%
BenchmarkNinOptNativeUnionProtoMarshal973303-68.86%
BenchmarkNinOptStructUnionProtoMarshal1536521-66.08%
BenchmarkNinEmbeddedStructUnionProtoMarshal2327884-62.01%
BenchmarkNinNestedStructUnionProtoMarshal2070743-64.11%
BenchmarkTreeProtoMarshal1554838-46.07%
BenchmarkOrBranchProtoMarshal31562012-36.25%
BenchmarkAndBranchProtoMarshal31831996-37.29%
BenchmarkLeafProtoMarshal965606-37.20%
BenchmarkDeepTreeProtoMarshal23161283-44.60%
BenchmarkADeepBranchProtoMarshal27191492-45.13%
BenchmarkAndDeepBranchProtoMarshal46632922-37.34%
BenchmarkDeepLeafProtoMarshal18491016-45.05%
BenchmarkNilProtoMarshal43976-82.53%
BenchmarkNidOptEnumProtoMarshal514152-70.43%
BenchmarkNinOptEnumProtoMarshal550158-71.27%
BenchmarkNidRepEnumProtoMarshal647207-68.01%
BenchmarkNinRepEnumProtoMarshal662213-67.82%
BenchmarkTimerProtoMarshal934271-70.99%
BenchmarkMyExtendableProtoMarshal608185-69.57%
BenchmarkOtherExtenableProtoMarshal1112332-70.14%
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoMarshal126.86378.862.99x
BenchmarkNinOptNativeProtoMarshal114.27298.422.61x
BenchmarkNidRepNativeProtoMarshal164.25561.203.42x
BenchmarkNinRepNativeProtoMarshal166.10568.233.42x
BenchmarkNidRepPackedNativeProtoMarshal99.10283.972.87x
BenchmarkNinRepPackedNativeProtoMarshal101.30282.312.79x
BenchmarkNidOptStructProtoMarshal176.83339.071.92x
BenchmarkNinOptStructProtoMarshal163.59326.572.00x
BenchmarkNidRepStructProtoMarshal178.84427.492.39x
BenchmarkNinRepStructProtoMarshal178.70437.692.45x
BenchmarkNidEmbeddedStructProtoMarshal124.24317.562.56x
BenchmarkNinEmbeddedStructProtoMarshal132.03307.992.33x
BenchmarkNidNestedStructProtoMarshal192.91337.861.75x
BenchmarkNinNestedStructProtoMarshal192.44344.451.79x
BenchmarkNidOptCustomProtoMarshal29.77116.033.90x
BenchmarkNinOptCustomProtoMarshal22.29115.385.18x
BenchmarkNidRepCustomProtoMarshal35.14189.805.40x
BenchmarkNinRepCustomProtoMarshal36.50188.405.16x
BenchmarkNinOptNativeUnionProtoMarshal32.87105.393.21x
BenchmarkNinOptStructUnionProtoMarshal66.40195.762.95x
BenchmarkNinEmbeddedStructUnionProtoMarshal93.24245.262.63x
BenchmarkNinNestedStructUnionProtoMarshal57.49160.062.78x
BenchmarkTreeProtoMarshal137.64255.121.85x
BenchmarkOrBranchProtoMarshal137.80216.101.57x
BenchmarkAndBranchProtoMarshal136.64217.891.59x
BenchmarkLeafProtoMarshal214.48341.531.59x
BenchmarkDeepTreeProtoMarshal95.85173.031.81x
BenchmarkADeepBranchProtoMarshal82.73150.781.82x
BenchmarkAndDeepBranchProtoMarshal96.72153.981.59x
BenchmarkDeepLeafProtoMarshal117.34213.411.82x
BenchmarkNidOptEnumProtoMarshal3.8913.163.38x
BenchmarkNinOptEnumProtoMarshal1.826.303.46x
BenchmarkNidRepEnumProtoMarshal12.3638.503.11x
BenchmarkNinRepEnumProtoMarshal12.0837.533.11x
BenchmarkTimerProtoMarshal73.81253.873.44x
BenchmarkMyExtendableProtoMarshal13.1543.083.28x
BenchmarkOtherExtenableProtoMarshal24.2881.093.34x
- -## Unmarshaler - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoUnmarshal25211006-60.10%
BenchmarkNinOptNativeProtoUnmarshal25291750-30.80%
BenchmarkNidRepNativeProtoUnmarshal4906735299-28.06%
BenchmarkNinRepNativeProtoUnmarshal4799035456-26.12%
BenchmarkNidRepPackedNativeProtoUnmarshal2645623950-9.47%
BenchmarkNinRepPackedNativeProtoUnmarshal2649924037-9.29%
BenchmarkNidOptStructProtoUnmarshal68033873-43.07%
BenchmarkNinOptStructProtoUnmarshal67864154-38.79%
BenchmarkNidRepStructProtoUnmarshal5627631970-43.19%
BenchmarkNinRepStructProtoUnmarshal4875031832-34.70%
BenchmarkNidEmbeddedStructProtoUnmarshal45561973-56.69%
BenchmarkNinEmbeddedStructProtoUnmarshal44851975-55.96%
BenchmarkNidNestedStructProtoUnmarshal223395135844-39.19%
BenchmarkNinNestedStructProtoUnmarshal226446134022-40.82%
BenchmarkNidOptCustomProtoUnmarshal1859300-83.86%
BenchmarkNinOptCustomProtoUnmarshal1486402-72.95%
BenchmarkNidRepCustomProtoUnmarshal82291669-79.72%
BenchmarkNinRepCustomProtoUnmarshal82531649-80.02%
BenchmarkNinOptNativeUnionProtoUnmarshal840307-63.45%
BenchmarkNinOptStructUnionProtoUnmarshal1395639-54.19%
BenchmarkNinEmbeddedStructUnionProtoUnmarshal22971167-49.19%
BenchmarkNinNestedStructUnionProtoUnmarshal1820889-51.15%
BenchmarkTreeProtoUnmarshal1521720-52.66%
BenchmarkOrBranchProtoUnmarshal26691385-48.11%
BenchmarkAndBranchProtoUnmarshal26671420-46.76%
BenchmarkLeafProtoUnmarshal1171584-50.13%
BenchmarkDeepTreeProtoUnmarshal20651081-47.65%
BenchmarkADeepBranchProtoUnmarshal26951178-56.29%
BenchmarkAndDeepBranchProtoUnmarshal40551918-52.70%
BenchmarkDeepLeafProtoUnmarshal1758865-50.80%
BenchmarkNilProtoUnmarshal56463-88.79%
BenchmarkNidOptEnumProtoUnmarshal76273-90.34%
BenchmarkNinOptEnumProtoUnmarshal764163-78.66%
BenchmarkNidRepEnumProtoUnmarshal1078447-58.53%
BenchmarkNinRepEnumProtoUnmarshal1071479-55.28%
BenchmarkTimerProtoUnmarshal1128362-67.91%
BenchmarkMyExtendableProtoUnmarshal808217-73.14%
BenchmarkOtherExtenableProtoUnmarshal1233517-58.07%
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoUnmarshal133.67334.982.51x
BenchmarkNinOptNativeProtoUnmarshal119.77173.081.45x
BenchmarkNidRepNativeProtoUnmarshal143.23199.121.39x
BenchmarkNinRepNativeProtoUnmarshal146.07198.161.36x
BenchmarkNidRepPackedNativeProtoUnmarshal127.80141.041.10x
BenchmarkNinRepPackedNativeProtoUnmarshal127.55140.781.10x
BenchmarkNidOptStructProtoUnmarshal185.79326.311.76x
BenchmarkNinOptStructProtoUnmarshal167.68273.661.63x
BenchmarkNidRepStructProtoUnmarshal147.88260.391.76x
BenchmarkNinRepStructProtoUnmarshal171.20261.971.53x
BenchmarkNidEmbeddedStructProtoUnmarshal142.86329.422.31x
BenchmarkNinEmbeddedStructProtoUnmarshal137.33311.832.27x
BenchmarkNidNestedStructProtoUnmarshal154.97259.471.67x
BenchmarkNinNestedStructProtoUnmarshal154.32258.421.67x
BenchmarkNidOptCustomProtoUnmarshal19.36119.666.18x
BenchmarkNinOptCustomProtoUnmarshal21.5279.503.69x
BenchmarkNidRepCustomProtoUnmarshal17.6286.864.93x
BenchmarkNinRepCustomProtoUnmarshal17.5787.925.00x
BenchmarkNinOptNativeUnionProtoUnmarshal38.07104.122.73x
BenchmarkNinOptStructUnionProtoUnmarshal73.08159.542.18x
BenchmarkNinEmbeddedStructUnionProtoUnmarshal94.00185.921.98x
BenchmarkNinNestedStructUnionProtoUnmarshal65.35133.752.05x
BenchmarkTreeProtoUnmarshal141.28297.132.10x
BenchmarkOrBranchProtoUnmarshal162.56313.961.93x
BenchmarkAndBranchProtoUnmarshal163.06306.151.88x
BenchmarkLeafProtoUnmarshal176.72354.192.00x
BenchmarkDeepTreeProtoUnmarshal107.50205.301.91x
BenchmarkADeepBranchProtoUnmarshal83.48190.882.29x
BenchmarkAndDeepBranchProtoUnmarshal110.97234.602.11x
BenchmarkDeepLeafProtoUnmarshal123.40250.732.03x
BenchmarkNidOptEnumProtoUnmarshal2.6227.1610.37x
BenchmarkNinOptEnumProtoUnmarshal1.316.114.66x
BenchmarkNidRepEnumProtoUnmarshal7.4217.882.41x
BenchmarkNinRepEnumProtoUnmarshal7.4716.692.23x
BenchmarkTimerProtoUnmarshal61.12190.343.11x
BenchmarkMyExtendableProtoUnmarshal9.9036.713.71x
BenchmarkOtherExtenableProtoUnmarshal21.9052.132.38x
\ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/custom_types.md b/vendor/github.com/gogo/protobuf/custom_types.md deleted file mode 100644 index 3eed249b..00000000 --- a/vendor/github.com/gogo/protobuf/custom_types.md +++ /dev/null @@ -1,68 +0,0 @@ -# Custom types - -Custom types is a gogo protobuf extensions that allows for using a custom -struct type to decorate the underlying structure of the protocol message. - -# How to use - -## Defining the protobuf message - -```proto -message CustomType { - optional ProtoType Field = 1 [(gogoproto.customtype) = "T"]; -} - -message ProtoType { - optional string Field = 1; -} -``` - -or alternatively you can declare the field type in the protocol message to be -`bytes`: - -```proto -message BytesCustomType { - optional bytes Field = 1 [(gogoproto.customtype) = "T"]; -} -``` - -The downside of using `bytes` is that it makes it harder to generate protobuf -code in other languages. In either case, it is the user responsibility to -ensure that the custom type marshals and unmarshals to the expected wire -format. That is, in the first example, gogo protobuf will not attempt to ensure -that the wire format of `ProtoType` and `T` are wire compatible. - -## Custom type method signatures - -The custom type must define the following methods with the given -signatures. Assuming the custom type is called `T`: - -```go -func (t T) Marshal() ([]byte, error) {} -func (t *T) MarshalTo(data []byte) (n int, err error) {} -func (t *T) Unmarshal(data []byte) error {} - -func (t T) MarshalJSON() ([]byte, error) {} -func (t *T) UnmarshalJSON(data []byte) error {} - -// only required if the compare option is set -func (t T) Compare(other T) int {} -// only required if the equal option is set -func (t T) Equal(other T) bool {} -// only required if populate option is set -func NewPopulatedT(r randyThetest) *T {} -``` - -Check [t.go](test/t.go) for a full example - -# Warnings and issues - -`Warning about customtype: It is your responsibility to test all cases of your marshaling, unmarshaling and size methods implemented for your custom type.` - -Issues with customtype include: - * A Bytes method is not allowed. - * Defining a customtype as a fake proto message is broken. - * proto.Clone is broken. - * Using a proto message as a customtype is not allowed. - * cusomtype of type map can not UnmarshalText - * customtype of type struct cannot jsonpb unmarshal diff --git a/vendor/github.com/gogo/protobuf/extensions.md b/vendor/github.com/gogo/protobuf/extensions.md deleted file mode 100644 index 35dfee16..00000000 --- a/vendor/github.com/gogo/protobuf/extensions.md +++ /dev/null @@ -1,162 +0,0 @@ -# gogoprotobuf Extensions - -Here is an [example.proto](https://github.com/gogo/protobuf/blob/master/test/example/example.proto) which uses most of the gogoprotobuf code generation plugins. - -Please also look at the example [Makefile](https://github.com/gogo/protobuf/blob/master/test/example/Makefile) which shows how to specify the `descriptor.proto` and `gogo.proto` in your proto_path - -The documentation at [http://godoc.org/github.com/gogo/protobuf/gogoproto](http://godoc.org/github.com/gogo/protobuf/gogoproto) describes the extensions made to goprotobuf in more detail. - -Also see [http://godoc.org/github.com/gogo/protobuf/plugin/](http://godoc.org/github.com/gogo/protobuf/plugin/) for documentation of each of the extensions which have their own plugins. - -# Fast Marshalling and Unmarshalling - -Generating a `Marshal`, `MarshalTo`, `Size` (or `ProtoSize`) and `Unmarshal` method for a struct results in faster marshalling and unmarshalling than when using reflect. - -See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for a comparison between reflect and generated code used for marshalling and unmarshalling. - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
marshalerMessageboolif true, a Marshal and MarshalTo method is generated for the specific messagefalse
sizerMessageboolif true, a Size method is generated for the specific messagefalse
unmarshaler Message bool if true, an Unmarshal method is generated for the specific message false
protosizerMessageboolif true, a ProtoSize method is generated for the specific messagefalse
unsafe_marshaler (deprecated) Message bool if true, a Marshal and MarshalTo method is generated. false
unsafe_unmarshaler (deprecated) Message bool if true, an Unmarshal method is generated. false
stable_marshaler Message bool if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed false
typedecl (beta) Message bool if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
- -# More Canonical Go Structures - -Lots of times working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. - -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a new struct. - -`gogoprotobuf` tries to fix these problems with the nullable, embed, customtype, customname, casttype, castkey and castvalue field extensions. - - - - - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
nullable Field bool if false, a field is generated without a pointer (see warning below). true
embed Field bool if true, the field is generated as an embedded field. false
customtype Field string It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128. For more information please refer to the CustomTypes document goprotobuf type
customname (beta) Field string Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. goprotobuf field name
casttype (beta) Field string Changes the generated field type. It assumes that this type is castable to the original goprotobuf field type. It currently does not support maps, structs or enums. goprotobuf field type
castkey (beta) Field string Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
castvalue (beta) Field string Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
enum_customname (beta) Enum string Sets the type name of an enum. If goproto_enum_prefix is enabled, this value will be used as a prefix when generating enum values.goprotobuf enum type name. Helps with golint issues.
enumdecl (beta) Enum bool if false, type declaration of the enum is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
enumvalue_customname (beta) Enum Value string Changes the generated enum name. Helps with golint issues.goprotobuf enum value name
stdtime Timestamp Field bool Changes the Well Known Timestamp Type to time.TimeTimestamp
stdduration Duration Field bool Changes the Well Known Duration Type to time.DurationDuration
- -`Warning about nullable: according to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set.` - -# Goprotobuf Compatibility - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers (see the section on tests below). - -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. - -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf. - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
gogoproto_import File bool if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. true
goproto_enum_prefix Enum bool if false, generates the enum constant names without the messagetype prefix true
goproto_getters Message bool if false, the message is generated without get methods, this is useful when you would rather want to use face true
goproto_stringer Message bool if false, the message is generated without the default string method, this is useful for rather using stringer true
goproto_enum_stringer (experimental) Enum bool if false, the enum is generated without the default string method, this is useful for rather using enum_stringer true
goproto_extensions_map (beta) Message bool if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension true
goproto_unrecognized (beta) Message bool if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. true
goproto_registration (beta) File bool if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). false
- -# Less Typing - -The Protocol Buffer language is very parseable and extra code can be easily generated for structures. - -Helper methods, functions and interfaces can be generated by triggering certain extensions like gostring. - - - - - - - - - - - - - -
NameOptionTypeDescriptionDefault
gostring Message bool if true, a `GoString` method is generated. This returns a string representing valid go code to reproduce the current state of the struct. false
onlyone Message bool if true, all fields must be nullable and only one of the fields may be set, like a union. Two methods are generated: `GetValue() interface{}` and `SetValue(v interface{}) (set bool)`. These provide easier interaction with a union. false
equal Message bool if true, an Equal method is generated false
compare Message bool if true, a Compare method is generated. This is very useful for quickly implementing sort on a list of protobuf structs false
verbose_equal Message bool if true, a verbose equal method is generated for the message. This returns an error which describes the exact element which is not equal to the exact element in the other struct. false
stringer Message bool if true, a String method is generated for the message. false
face Message bool if true, a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. This interface contains getters for each of the fields in the struct. The specified struct is also generated with the getters. This allows it to satisfy its own face. false
description (beta) Message bool if true, a Description method is generated for the message. false
populate Message bool if true, a `NewPopulated` function is generated. This is necessary for generated tests. false
enum_stringer (experimental) Enum bool if true, a String method is generated for an Enum false
- -Issues with Compare include: - * Oneof is not supported yet - * Not all Well Known Types are supported yet - * Maps are not supported - -#Peace of Mind - -Test and Benchmark generation is done with the following extensions: - - - - -
testgen Message bool if true, tests are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
benchgen Message bool if true, benchmarks are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
- -# More Serialization Formats - -Other serialization formats like xml and json typically use reflect to marshal and unmarshal structured data. Manipulating these structs into something other than the default Go requires editing tags. The following extensions provide ways of editing these tags for the generated protobuf structs. - - - - -
jsontag (beta) Field string if set, the json tag value between the double quotes is replaced with this string fieldname
moretags (beta) Field string if set, this string is appended to the tag string empty
- -Here is a longer explanation of jsontag and moretags - -# File Options - -Each of the boolean message and enum extensions also have a file extension: - - * `marshaler_all` - * `sizer_all` - * `protosizer_all` - * `unmarshaler_all` - * `unsafe_marshaler_all` - * `unsafe_unmarshaler_all` - * `stable_marshaler_all` - * `goproto_enum_prefix_all` - * `goproto_getters_all` - * `goproto_stringer_all` - * `goproto_enum_stringer_all` - * `goproto_extensions_map_all` - * `goproto_unrecognized_all` - * `gostring_all` - * `onlyone_all` - * `equal_all` - * `compare_all` - * `verbose_equal_all` - * `stringer_all` - * `enum_stringer_all` - * `face_all` - * `description_all` - * `populate_all` - * `testgen_all` - * `benchgen_all` - * `enumdecl_all` - * `typedecl_all` - -Each of these are the same as their Message Option counterparts, except they apply to all messages in the file. Their Message option counterparts can also be used to overwrite their effect. - -# Tests - - * The normal barrage of tests are run with: `make tests` - * A few weird tests: `make testall` - * Tests for compatibility with [golang/protobuf](https://github.com/golang/protobuf) are handled by a different project [harmonytests](https://github.com/gogo/harmonytests), since it requires goprotobuf. - * Cross version tests are made with [Travis CI](https://travis-ci.org/gogo/protobuf). - * GRPC Tests are also handled by a different project [grpctests](https://github.com/gogo/grpctests), since it depends on a lot of grpc libraries. - * Thanks to [go-fuzz](https://github.com/dvyukov/go-fuzz/) we have proper [fuzztests](https://github.com/gogo/fuzztests). - diff --git a/vendor/github.com/gogo/protobuf/install-protobuf.sh b/vendor/github.com/gogo/protobuf/install-protobuf.sh deleted file mode 100755 index f42fc9e6..00000000 --- a/vendor/github.com/gogo/protobuf/install-protobuf.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -die() { - echo "$@" >&2 - exit 1 -} - -cd /home/travis - -case "$PROTOBUF_VERSION" in -2*) - basename=protobuf-$PROTOBUF_VERSION - wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz - tar xzf $basename.tar.gz - cd protobuf-$PROTOBUF_VERSION - ./configure --prefix=/home/travis && make -j2 && make install - ;; -3*) - basename=protoc-$PROTOBUF_VERSION-linux-x86_64 - wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.zip - unzip $basename.zip - ;; -*) - die "unknown protobuf version: $PROTOBUF_VERSION" - ;; -esac diff --git a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel b/vendor/github.com/gogo/protobuf/proto/BUILD.bazel deleted file mode 100644 index 00709c2b..00000000 --- a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel +++ /dev/null @@ -1,68 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "clone.go", - "decode.go", - "decode_gogo.go", - "discard.go", - "duration.go", - "duration_gogo.go", - "encode.go", - "encode_gogo.go", - "equal.go", - "extensions.go", - "extensions_gogo.go", - "lib.go", - "lib_gogo.go", - "message_set.go", - "pointer_unsafe.go", - "pointer_unsafe_gogo.go", - "properties.go", - "properties_gogo.go", - "skip_gogo.go", - "text.go", - "text_gogo.go", - "text_parser.go", - "timestamp.go", - "timestamp_gogo.go", - ], - importpath = "github.com/gogo/protobuf/proto", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "message_set_test.go", - "size2_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/gogo/protobuf/proto", -) - -go_test( - name = "go_default_xtest", - srcs = [ - "all_test.go", - "any_test.go", - "clone_test.go", - "decode_test.go", - "encode_test.go", - "equal_test.go", - "extensions_test.go", - "map_test.go", - "proto3_test.go", - "size_test.go", - "text_parser_test.go", - "text_test.go", - ], - importpath = "github.com/gogo/protobuf/proto_test", - deps = [ - ":go_default_library", - "//vendor/github.com/gogo/protobuf/proto/proto3_proto:go_default_library", - "//vendor/github.com/gogo/protobuf/proto/testdata:go_default_library", - "//vendor/github.com/gogo/protobuf/types:go_default_library", - ], -) diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 41c71757..00000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/all_test.go b/vendor/github.com/gogo/protobuf/proto/all_test.go deleted file mode 100644 index b5f8709d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/all_test.go +++ /dev/null @@ -1,2278 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/gogo/protobuf/proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - errType reflect.Type - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since the Marshal method returned bytes, they should be written to the - // buffer. (For efficiency, we assume that Marshal implementations are - // always correct w.r.t. RequiredNotSetError and output.) - want: []byte{5, 6, 7}, - errType: reflect.TypeOf(errors.New("some marshal err")), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - errType: reflect.TypeOf(&RequiredNotSetError{}), - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if reflect.TypeOf(err) != test.errType { - t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - if size := Size(test.m); size != len(b.Bytes()) { - t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) - } - - m, mErr := Marshal(test.m) - if !bytes.Equal(b.Bytes(), m) { - t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) - } - if !reflect.DeepEqual(err, mErr) { - t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", - test.name, fmt.Sprint(mErr), fmt.Sprint(err)) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err = Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcementGroups(t *testing.T) { - pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} - if _, err := Marshal(pb); err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { - t.Errorf("marshal: bad error type: %v", err) - } - - buf := []byte{11, 12} - if err := Unmarshal(buf, pb); err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - { - var m *GoEnum - if _, err := Marshal(m); err != ErrNil { - t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) - } - } - - { - m := &Communique{Union: &Communique_Msg{Msg: nil}} - if _, err := Marshal(m); err == nil || err == ErrNil { - t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) - } - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err = Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - mbytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(mbytes, expected, t) { - o.DebugPrint("neq 1", mbytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(mbytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - mbytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", mbytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(mbytes, expected, t) { - o.DebugPrint("neq 2", mbytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: {F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m1 := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.MsgMapping[1]; !ok { - t.Error("msg_mapping[1] not present") - } else if v != nil { - t.Errorf("msg_mapping[1] not nil: %v", v) - } -} - -func TestMapFieldWithNilBytes(t *testing.T) { - m1 := &MessageWithMap{ - ByteMapping: map[bool][]byte{ - false: {}, - true: nil, - }, - } - n := Size(m1) - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if n != len(b) { - t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.ByteMapping[false]; !ok { - t.Error("byte_mapping[false] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[false] not empty: %#v", v) - } - if v, ok := m2.ByteMapping[true]; !ok { - t.Error("byte_mapping[true] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[true] not empty: %#v", v) - } -} - -func TestDecodeMapFieldMissingKey(t *testing.T) { - b := []byte{ - 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes - // no key - 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing key: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) - } -} - -func TestDecodeMapFieldMissingValue(t *testing.T) { - b := []byte{ - 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes - 0x08, 0x01, // varint key, value 1 - // no value - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing value: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) - } -} - -func TestOneof(t *testing.T) { - m := &Communique{} - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal of empty message with oneof: %v", err) - } - if len(b) != 0 { - t.Errorf("Marshal of empty message yielded too many bytes: %v", b) - } - - m = &Communique{ - Union: &Communique_Name{Name: "Barry"}, - } - - // Round-trip. - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof: %v", err) - } - if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) - t.Errorf("Incorrect marshal of message with oneof: %v", b) - } - m.Reset() - if err = Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof: %v", err) - } - if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { - t.Errorf("After round trip, Union = %+v", m.Union) - } - if name := m.GetName(); name != "Barry" { - t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") - } - - // Let's try with a message in the oneof. - m.Union = &Communique_Msg{Msg: &Strings{StringField: String("deep deep string")}} - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof set to message: %v", err) - } - if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) - t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof set to message: %v", err) - } - ss, ok := m.Union.(*Communique_Msg) - if !ok || ss.Msg.GetStringField() != "deep deep string" { - t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) - } -} - -func TestInefficientPackedBool(t *testing.T) { - // https://github.com/golang/protobuf/issues/76 - inp := []byte{ - 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes - // Usually a bool should take a single byte, - // but it is permitted to be any varint. - 0xb9, 0x30, - } - if err := Unmarshal(inp, new(MoreRepeated)); err != nil { - t.Error(err) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/any_test.go b/vendor/github.com/gogo/protobuf/proto/any_test.go deleted file mode 100644 index f098d828..00000000 --- a/vendor/github.com/gogo/protobuf/proto/any_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "strings" - "testing" - - "github.com/gogo/protobuf/proto" - - pb "github.com/gogo/protobuf/proto/proto3_proto" - testpb "github.com/gogo/protobuf/proto/testdata" - "github.com/gogo/protobuf/types" -) - -var ( - expandedMarshaler = proto.TextMarshaler{ExpandAny: true} - expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} -) - -// anyEqual reports whether two messages which may be google.protobuf.Any or may -// contain google.protobuf.Any fields are equal. We can't use proto.Equal for -// comparison, because semantically equivalent messages may be marshaled to -// binary in different tag order. Instead, trust that TextMarshaler with -// ExpandAny option works and compare the text marshaling results. -func anyEqual(got, want proto.Message) bool { - // if messages are proto.Equal, no need to marshal. - if proto.Equal(got, want) { - return true - } - g := expandedMarshaler.Text(got) - w := expandedMarshaler.Text(want) - return g == w -} - -type golden struct { - m proto.Message - t, c string -} - -var goldenMessages = makeGolden() - -func makeGolden() []golden { - nested := &pb.Nested{Bunny: "Monty"} - nb, err := proto.Marshal(nested) - if err != nil { - panic(err) - } - m1 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m2 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m3 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, - } - m4 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, - } - m5 := &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} - - any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} - proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) - proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) - any1b, err := proto.Marshal(any1) - if err != nil { - panic(err) - } - any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} - proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) - any2b, err := proto.Marshal(any2) - if err != nil { - panic(err) - } - m6 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - ManyThings: []*types.Any{ - {TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, - {TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - }, - } - - const ( - m1Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m2Golden = ` -name: "David" -result_count: 47 -anything: < - ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m3Golden = ` -name: "David" -result_count: 47 -anything: < - ["type.googleapis.com/\"/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m4Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m5Golden = ` -[type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" -> -` - m6Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 42 - bikeshed: GREEN - rep_bytes: "roboto" - [testdata.Ext.more]: < - data: "baz" - > - > -> -many_things: < - [type.googleapis.com/testdata.MyMessage]: < - count: 47 - name: "David" - [testdata.Ext.more]: < - data: "foo" - > - [testdata.Ext.text]: "bar" - > -> -` - ) - return []golden{ - {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, - {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, - {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, - {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, - {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, - {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, - } -} - -func TestMarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { - t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) - } - if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { - t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) - } - } -} - -func TestUnmarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - want := tt.m - got := proto.Clone(tt.m) - got.Reset() - if err := proto.UnmarshalText(tt.t, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) - } - got.Reset() - if err := proto.UnmarshalText(tt.c, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) - } - } -} - -func TestMarshalUnknownAny(t *testing.T) { - m := &pb.Message{ - Anything: &types.Any{ - TypeUrl: "foo", - Value: []byte("bar"), - }, - } - want := `anything: < - type_url: "foo" - value: "bar" -> -` - got := expandedMarshaler.Text(m) - if got != want { - t.Errorf("got\n`%s`\nwant\n`%s`", got, want) - } -} - -func TestAmbiguousAny(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - type_url: "ttt/proto3_proto.Nested" - value: "\n\x05Monty" - `, pb) - t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) - if err != nil { - t.Errorf("failed to parse ambiguous Any message: %v", err) - } -} - -func TestUnmarshalOverwriteAny(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 7: Any message unpacked multiple times, or "type_url" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} - -func TestUnmarshalAnyMixAndMatch(t *testing.T) { - pb := &types.Any{} - err := proto.UnmarshalText(` - value: "\n\x05Monty" - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 5: Any message unpacked multiple times, or "value" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go index 5d4cba4b..a26b046d 100644 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) { bIn := emIn.GetExtensions() bOut := emOut.GetExtensions() *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/clone_test.go b/vendor/github.com/gogo/protobuf/proto/clone_test.go deleted file mode 100644 index 1a16eb55..00000000 --- a/vendor/github.com/gogo/protobuf/proto/clone_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - 0x4002: { - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: { - F: proto.Float64(3.0), - Exact: proto.Bool(true), - }, // the entire message should be overwritten - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - 0x4002: { - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, - // Oneof fields should merge by assignment. - { - src: &pb.Communique{ - Union: &pb.Communique_Number{Number: 41}, - }, - dst: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Number{Number: 41}, - }, - }, - // Oneof nil is the same as not set. - { - src: &pb.Communique{}, - dst: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Name{Name: "Bobby Tables"}, - }, - }, - { - src: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Cute: true}, // replace - "kay_b": {Bunny: "rabbit"}, // insert - }, - }, - dst: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Bunny: "lost"}, // replaced - "kay_c": {Bunny: "bunny"}, // keep - }, - }, - want: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": {Cute: true}, - "kay_b": {Bunny: "rabbit"}, - "kay_c": {Bunny: "bunny"}, - }, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/size2_test.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go similarity index 56% rename from vendor/github.com/gogo/protobuf/proto/size2_test.go rename to vendor/github.com/gogo/protobuf/proto/custom_gogo.go index a2729c39..24552483 100644 --- a/vendor/github.com/gogo/protobuf/proto/size2_test.go +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -1,7 +1,7 @@ -// Go support for Protocol Buffers - Google's data interchange format +// Protocol Buffers for Go with Gadgets // -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -13,9 +13,6 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -31,33 +28,12 @@ package proto -import ( - "testing" -) +import "reflect" -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int } + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 737f2731..d9aa3c42 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,541 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de4..00000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_test.go b/vendor/github.com/gogo/protobuf/proto/decode_test.go deleted file mode 100644 index 64d4decd..00000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - tpb "github.com/gogo/protobuf/proto/proto3_proto" -) - -var ( - bytesBlackhole []byte - msgBlackhole = new(tpb.Message) -) - -// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 -// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and -// 2 bytes long). -// func BenchmarkVarint32ArraySmall(b *testing.B) { -// for i := uint(1); i <= 10; i++ { -// dist := genInt32Dist([7]int{0, 3, 1}, 1<" } func init() { RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") } - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8b84d1b2..c27d35f8 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -39,7 +39,6 @@ import ( "errors" "fmt" "reflect" - "sort" ) // RequiredNotSetError is the error returned if Marshal is called with @@ -82,10 +81,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +165,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +219,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 32111b7f..0f5fb173 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -3,11 +3,6 @@ // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -18,9 +13,6 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,315 +28,6 @@ package proto -import ( - "reflect" -) - func NewRequiredNotSetError(field string) *RequiredNotSetError { return &RequiredNotSetError{field} } - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_test.go b/vendor/github.com/gogo/protobuf/proto/encode_test.go deleted file mode 100644 index 2176b894..00000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - tpb "github.com/gogo/protobuf/proto/proto3_proto" -) - -var ( - blackhole []byte -) - -// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 -// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the -// same. -// func BenchmarkAny(b *testing.B) { -// data := make([]byte, 1<<20) -// quantum := 1 << 10 -// for i := uint(0); i <= 10; i++ { -// b.Run(strconv.Itoa(quantum<> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { clearExtension(pb, extension.Field) } func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { + if epb, ok := pb.(extensionsBytes); ok { offset := 0 for offset != -1 { offset = deleteExtension(epb, fieldNum, offset) } return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -422,39 +333,33 @@ func clearExtension(pb Message, fieldNum int32) { delete(extmap, fieldNum) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) + return decodeExtensionFromBytes(extension, *ext) } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { + + epb, err := extendable(pb) + if err != nil { return nil, err } + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + emap, mu := epb.extensionsRead() if emap == nil { return defaultExtensionValue(extension) @@ -479,6 +384,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -496,6 +406,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -530,31 +445,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -564,9 +476,13 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } extensions = make([]interface{}, len(es)) for i, e := range es { - extensions[i], err = GetExtension(pb, e) + extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } @@ -581,9 +497,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -610,23 +526,18 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { return err } - *ext = append(*ext, p.buf...) + bb := epb.GetExtensions() + *bb = append(*bb, newb...) return nil } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -656,8 +567,8 @@ func ClearAllExtensions(pb Message) { *ext = []byte{} return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index ea6478f0..53ebd8cc 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -32,12 +32,36 @@ import ( "bytes" "errors" "fmt" + "io" "reflect" "sort" "strings" "sync" ) +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { if reflect.ValueOf(pb).IsNil() { return ifnotset @@ -56,19 +80,28 @@ func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool } func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } return bytes.Equal(this.enc, that.enc) } func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } return bytes.Compare(this.enc, that.enc) } func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) } type sortableMapElem struct { @@ -122,28 +155,26 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) } func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n } - return n, nil + return o, nil } func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { + e := m[id] + if err := e.Encode(); err != nil { return nil, err } - return m[id].enc, nil + return e.enc, nil } func size(buf []byte, wire int) (int, error) { @@ -218,35 +249,58 @@ func AppendExtension(e Message, tag int32, buf []byte) { } } -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } } - e.enc = p.buf return nil } func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) } return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) } @@ -292,3 +346,23 @@ func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { pb := extendable.(extendableProto) return pb.extensionsWrite() } + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_test.go b/vendor/github.com/gogo/protobuf/proto/extensions_test.go deleted file mode 100644 index 15c76a36..00000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,538 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestExtensionDescsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{Count: proto.Int32(0)} - extdesc1 := pb.E_Ext_More - if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { - t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) - } - - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - extdesc2 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 123456789, - Name: "a.b", - Tag: "varint,123456789,opt", - } - ext2 := proto.Bool(false) - if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { - t.Fatalf("Could not set ext2: %s", err) - } - - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Could not marshal msg: %v", err) - } - if err = proto.Unmarshal(b, msg); err != nil { - t.Fatalf("Could not unmarshal into msg: %v", err) - } - - descs, err := proto.ExtensionDescs(msg) - if err != nil { - t.Fatalf("proto.ExtensionDescs: got error %v", err) - } - sortExtDescs(descs) - wantDescs := []*proto.ExtensionDesc{extdesc1, {Field: extdesc2.Field}} - if !reflect.DeepEqual(descs, wantDescs) { - t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) - } -} - -type ExtensionDescSlice []*proto.ExtensionDesc - -func (s ExtensionDescSlice) Len() int { return len(s) } -func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } -func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func sortExtDescs(s []*proto.ExtensionDesc) { - sort.Sort(ExtensionDescSlice(s)) -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} - -func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { - // Add a repeated extension to the result. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - // Marshal message with a repeated extension. - msg1 := new(pb.OtherMessage) - err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) - if err != nil { - t.Fatalf("[%s] Error setting extension: %v", test.name, err) - } - b, err := proto.Marshal(msg1) - if err != nil { - t.Fatalf("[%s] Error marshaling message: %v", test.name, err) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err = proto.Unmarshal(b, msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_RComplex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.([]*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(ext, test.ext) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) - } - } -} - -func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { - // We may see multiple instances of the same extension in the wire - // format. For example, the proto compiler may encode custom options in - // this way. Here, we verify that we merge the extensions together. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - var buf bytes.Buffer - var want pb.ComplexExtension - - // Generate a serialized representation of a repeated extension - // by catenating bytes together. - for i, e := range test.ext { - // Merge to create the wanted proto. - proto.Merge(&want, e) - - // serialize the message - msg := new(pb.OtherMessage) - err := proto.SetExtension(msg, pb.E_Complex, e) - if err != nil { - t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) - } - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) - } - buf.Write(b) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err := proto.Unmarshal(buf.Bytes(), msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_Complex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.(*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !reflect.DeepEqual(*ext, want) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, want) - } - } -} - -func TestClearAllExtensions(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - m := &pb.MyMessage{} - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - if !proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) - } - proto.ClearAllExtensions(m) - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } -} - -func TestMarshalRace(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - - m := &pb.MyMessage{Count: proto.Int32(4)} - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - - errChan := make(chan error, 3) - for n := 3; n > 0; n-- { - go func() { - _, err := proto.Marshal(m) - errChan <- err - }() - } - for i := 0; i < 3; i++ { - err := <-errChan - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index c98d73da..0f1950c6 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const GoGoProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index 4b4f7c90..b3aa3919 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -33,6 +33,14 @@ import ( "strconv" ) +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { s, ok := m[value] if !ok { diff --git a/vendor/github.com/gogo/protobuf/proto/map_test.go b/vendor/github.com/gogo/protobuf/proto/map_test.go deleted file mode 100644 index 18b946d0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/map_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package proto_test - -import ( - "fmt" - "testing" - - "github.com/gogo/protobuf/proto" - ppb "github.com/gogo/protobuf/proto/proto3_proto" -) - -func marshalled() []byte { - m := &ppb.IntMaps{} - for i := 0; i < 1000; i++ { - m.Maps = append(m.Maps, &ppb.IntMap{ - Rtt: map[int32]int32{1: 2}, - }) - } - b, err := proto.Marshal(m) - if err != nil { - panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) - } - return b -} - -func BenchmarkConcurrentMapUnmarshal(b *testing.B) { - in := marshalled() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } - }) -} - -func BenchmarkSequentialMapUnmarshal(b *testing.B) { - in := marshalled() - b.ResetTimer() - for i := 0; i < b.N; i++ { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index fd982dec..3b6ca41d 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/gogo/protobuf/proto/message_set_test.go b/vendor/github.com/gogo/protobuf/proto/message_set_test.go deleted file mode 100644 index 353a3ea7..00000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &messageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - var extensions XXX_InternalExtensions - if err := UnmarshalMessageSet(b, &extensions); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := extensions.p.extensionMap[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index fb512e2e..b6cad908 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go index 1763a5f2..7ffd3c29 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. package proto @@ -34,52 +38,22 @@ import ( "reflect" ) -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") -} - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} +// TODO: untested, so probably incorrect. -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} } -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index 6b5567d4..d55a335d 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index f156a29f..b354101b 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,92 +37,20 @@ import ( "unsafe" ) -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} } -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 2a69e886..7a5e28ef 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -63,42 +63,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -145,13 +109,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -197,36 +154,19 @@ type Properties struct { StdTime bool StdDuration bool - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -272,29 +212,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -309,6 +234,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -336,7 +262,7 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } case strings.HasPrefix(f, "embedded="): p.OrigName = strings.Split(f, "=")[1] @@ -352,292 +278,43 @@ func (p *Properties) Parse(s string) { } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { isMap := typ.Kind() == reflect.Map if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) + p.ctype = typ p.setTag(lockGetProp) return } if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) p.setTag(lockGetProp) return } if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) p.setTag(lockGetProp) return } switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } case reflect.Struct: p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } - case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte + p.stype = t3 } case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) + p.stype = t2 } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map p.mtype = t1 p.mkeyprop = &Properties{} @@ -659,20 +336,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -683,20 +346,9 @@ func (p *Properties) setTag(lockGetProp bool) { } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -706,14 +358,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -763,10 +412,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -777,23 +422,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { isOneofMessage = true @@ -809,9 +437,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -822,8 +447,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -873,30 +497,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -925,20 +525,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -954,7 +576,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index b6b7176c..40ea3dd9 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,83 +29,8 @@ package proto import ( - "fmt" - "os" "reflect" ) -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/proto3_test.go b/vendor/github.com/gogo/protobuf/proto/proto3_test.go deleted file mode 100644 index 75b66c17..00000000 --- a/vendor/github.com/gogo/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/proto3_proto" - tpb "github.com/gogo/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestGettersForBasicTypesExist(t *testing.T) { - var m pb.Message - if got := m.GetNested().GetBunny(); got != "" { - t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) - } - if got := m.GetNested().GetCute(); got { - t.Errorf("m.GetNested().GetCute() = %t, want false", got) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": {N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/size_test.go b/vendor/github.com/gogo/protobuf/proto/size_test.go deleted file mode 100644 index 0a6c1772..00000000 --- a/vendor/github.com/gogo/protobuf/proto/size_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: {}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: {F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: {}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, - - {"oneof not set", &pb.Oneof{}}, - {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{F_Bool: true}}}, - {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 0}}}, - {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 1 << 20}}}, - {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{F_Int64: 42}}}, - {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{F_Fixed32: 43}}}, - {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{F_Fixed64: 44}}}, - {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{F_Uint32: 45}}}, - {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{F_Uint64: 46}}}, - {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{F_Float: 47.1}}}, - {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{F_Double: 48.9}}}, - {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{F_String: "Rhythmic Fman"}}}, - {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{F_Bytes: []byte("let go")}}}, - {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{F_Sint32: 50}}}, - {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{F_Sint64: 51}}}, - {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{F_Enum: pb.MyMessage_BLUE}}}, - {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, - {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{F_Message: &pb.GoTestField{Label: String("k"), Type: String("v")}}}}, - {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{FGroup: &pb.Oneof_F_Group{X: Int32(52)}}}}, - {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{F_Largest_Tag: 1}}}, - {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{F_Int32: 1}, Tormato: &pb.Oneof_Value{Value: 2}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..255e7b50 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,2799 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + if deterministic { + return nil, errors.New("proto: deterministic not supported by the Marshal method of " + u.typ.String()) + } + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + } + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 00000000..997f57c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 00000000..f520106e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..910e2dd6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2048 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 00000000..00d6c7ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index f609d1d4..4f5706dc 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -57,7 +57,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -177,11 +176,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -276,6 +270,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -447,12 +445,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } if len(props.Enum) > 0 { if err := tm.writeEnum(w, fv, props); err != nil { @@ -475,7 +467,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } - if pv.Type().Implements(extensionRangeType) { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -484,27 +476,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -605,6 +576,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -613,8 +597,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index f1276729..fbb000d3 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -212,7 +212,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -283,60 +282,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -734,6 +720,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -983,7 +972,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -1001,13 +990,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go b/vendor/github.com/gogo/protobuf/proto/text_parser_test.go deleted file mode 100644 index 9a3a447c..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,673 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation with double quotes - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenation with single quotes - { - in: "count:42 name: 'My name is '\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenations with mixed quotes - { - in: "count:42 name: 'My name is '\n\"elsewhere\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - { - in: "count:42 name: \"My name is \"\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated field with list notation - { - in: `count:42 pet: ["horsey", "bunny"]`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Missing required field in a required submessage - { - in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, - err: `proto: required field "testdata.InnerMessage.host" not set`, - out: &MyMessage{ - Count: Int32(42), - WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Boolean false - { - in: `count:42 inner { host: "example.com" connected: false }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean true - { - in: `count:42 inner { host: "example.com" connected: true }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean 0 - { - in: `count:42 inner { host: "example.com" connected: 0 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean 1 - { - in: `count:42 inner { host: "example.com" connected: 1 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean f - { - in: `count:42 inner { host: "example.com" connected: f }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean t - { - in: `count:42 inner { host: "example.com" connected: t }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean False - { - in: `count:42 inner { host: "example.com" connected: False }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean True - { - in: `count:42 inner { host: "example.com" connected: True }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `msg_mapping:>` + // omitted key - `msg_mapping:` + // omitted value - `byte_mapping:` + - `byte_mapping:<>` // omitted key and value - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - 0: {F: Float64(5.0)}, - 1: nil, - }, - ByteMapping: map[bool][]byte{ - false: nil, - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestOneofParsing(t *testing.T) { - const in = `name:"Shrek"` - m := new(Communique) - want := &Communique{Union: &Communique_Name{Name: "Shrek"}} - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } - - const inOverwrite = `name:"Shrek" number:42` - m = new(Communique) - testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" - if err := UnmarshalText(inOverwrite, m); err == nil { - t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) - } else if err.Error() != testErr { - t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", - err.Error(), testErr) - } - -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_test.go b/vendor/github.com/gogo/protobuf/proto/text_test.go deleted file mode 100644 index 27df6cb9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_test.go +++ /dev/null @@ -1,474 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func TestTextOneof(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&pb.Communique{}, ``}, - // scalar field - {&pb.Communique{Union: &pb.Communique_Number{Number: 4}}, `number:4`}, - // message field - {&pb.Communique{Union: &pb.Communique_Msg{ - Msg: &pb.Strings{StringField: proto.String("why hello!")}, - }}, `msg:`}, - // bad oneof (should not panic) - {&pb.Communique{Union: &pb.Communique_Msg{Msg: nil}}, `msg:/* nil */`}, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pbStrings := new(pb.Strings) - if err := proto.UnmarshalText(s, pbStrings); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pbStrings, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pbStrings) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - { - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go index d4276474..38439fa9 100644 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -47,183 +47,3 @@ func (*timestamp) String() string { return "timestamp" } func init() { RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") } - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/.bazelrc b/vendor/github.com/google/go-containerregistry/.bazelrc deleted file mode 100644 index 397ecf81..00000000 --- a/vendor/github.com/google/go-containerregistry/.bazelrc +++ /dev/null @@ -1,17 +0,0 @@ -build --deleted_packages=\ -vendor/k8s.io/code-generator/cmd/deepcopy-gen,\ -vendor/k8s.io/code-generator/cmd/deepcopy-gen/args,\ -vendor/k8s.io/code-generator/pkg/util,\ -vendor/k8s.io/gengo/args,\ -vendor/k8s.io/gengo/examples/deepcopy-gen/generators,\ -vendor/k8s.io/gengo/examples/deepcopy-gen/generators/args,\ -vendor/github.com/spf13/cobra/cobra/cmd - -test --test_output=errors \ ---deleted_packages=\ -vendor/k8s.io/code-generator/cmd/deepcopy-gen,\ -vendor/k8s.io/code-generator/cmd/deepcopy-gen/args,\ -vendor/k8s.io/code-generator/pkg/util,\ -vendor/k8s.io/gengo/args,\ -vendor/k8s.io/gengo/examples/deepcopy-gen/generators,\ -vendor/github.com/spf13/cobra/cobra/cmd diff --git a/vendor/github.com/google/go-containerregistry/.gitattributes b/vendor/github.com/google/go-containerregistry/.gitattributes deleted file mode 100644 index e42b11ce..00000000 --- a/vendor/github.com/google/go-containerregistry/.gitattributes +++ /dev/null @@ -1,6 +0,0 @@ -# This file is documented at https://git-scm.com/docs/gitattributes. -# Linguist-specific attributes are documented at -# https://github.com/github/linguist. - -**/zz_deepcopy_generated.go linguist-generated=true -cmd/crane/doc/crane*.md linguist-generated=true diff --git a/vendor/github.com/google/go-containerregistry/.gitignore b/vendor/github.com/google/go-containerregistry/.gitignore deleted file mode 100644 index 5264ecc8..00000000 --- a/vendor/github.com/google/go-containerregistry/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -**/*~ -.project -bazel* -.idea -*.iml \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/.travis.yml b/vendor/github.com/google/go-containerregistry/.travis.yml deleted file mode 100644 index 1385f791..00000000 --- a/vendor/github.com/google/go-containerregistry/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -sudo: required - -dist: trusty - -language: - - go - -go: - - "1.10" - - tip - -before_install: - # Bazel requires JDK8 - - sudo apt-get install oracle-java8-installer - -services: - - docker - -addons: - apt: - sources: - - sourceline: 'deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8' - key_url: 'https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg' - packages: - - bazel - -script: - - bazel clean && bazel build //... - - bazel clean && bazel test --test_output=errors //... - - # Verify that all source files are correctly formatted. - - find . -name "*.go" | grep -v vendor/ | xargs gofmt -d -e -l - - - go clean -i - - go build -a -v -race ./... - - # Verify that generated crane docs are up-to-date. - - mkdir -p /tmp/gendoc && go run cmd/crane/help/main.go --dir /tmp/gendoc && diff -Naur /tmp/gendoc/ cmd/crane/doc/ diff --git a/vendor/github.com/google/go-containerregistry/BUILD.bazel b/vendor/github.com/google/go-containerregistry/BUILD.bazel deleted file mode 100644 index bb5c106c..00000000 --- a/vendor/github.com/google/go-containerregistry/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@bazel_gazelle//:def.bzl", "gazelle") - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -gazelle( - name = "gazelle", - command = "fix", - external = "vendored", - extra_args = [ - "-build_file_name", - "BUILD.bazel,BUILD", # Prioritize `BUILD.bazel` for newly added files. - ], - prefix = "github.com/google/go-containerregistry", -) diff --git a/vendor/github.com/google/go-containerregistry/CONTRIBUTING.md b/vendor/github.com/google/go-containerregistry/CONTRIBUTING.md deleted file mode 100644 index a3b1624b..00000000 --- a/vendor/github.com/google/go-containerregistry/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# How to Contribute to go-containerregistry - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. diff --git a/vendor/github.com/google/go-containerregistry/Gopkg.lock b/vendor/github.com/google/go-containerregistry/Gopkg.lock deleted file mode 100644 index 08055b3d..00000000 --- a/vendor/github.com/google/go-containerregistry/Gopkg.lock +++ /dev/null @@ -1,266 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/Microsoft/go-winio" - packages = ["."] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" - -[[projects]] - name = "github.com/cpuguy83/go-md2man" - packages = ["md2man"] - revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1" - version = "v1.0.8" - -[[projects]] - name = "github.com/docker/distribution" - packages = [ - "digest", - "reference" - ] - revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" - version = "v2.6.2" - -[[projects]] - name = "github.com/docker/docker" - packages = [ - "api/types", - "api/types/blkiodev", - "api/types/container", - "api/types/events", - "api/types/filters", - "api/types/mount", - "api/types/network", - "api/types/reference", - "api/types/registry", - "api/types/strslice", - "api/types/swarm", - "api/types/time", - "api/types/versions", - "api/types/volume", - "client", - "pkg/tlsconfig" - ] - revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" - version = "v1.13.1" - -[[projects]] - name = "github.com/docker/go-connections" - packages = [ - "nat", - "sockets", - "tlsconfig" - ] - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" - version = "v0.3.0" - -[[projects]] - name = "github.com/docker/go-units" - packages = ["."] - revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" - version = "v0.3.3" - -[[projects]] - name = "github.com/fsnotify/fsnotify" - packages = ["."] - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - branch = "master" - name = "github.com/golang/glog" - packages = ["."] - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/cmpopts", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value" - ] - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/printer", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token" - ] - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" - -[[projects]] - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - name = "github.com/magiconair/properties" - packages = ["."] - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" - -[[projects]] - name = "github.com/pelletier/go-toml" - packages = ["."] - revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" - version = "v1.1.0" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/russross/blackfriday" - packages = ["."] - revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5" - version = "v1.5.1" - -[[projects]] - name = "github.com/spf13/afero" - packages = [ - ".", - "mem" - ] - revision = "63644898a8da0bc22138abf860edaf5277b6102e" - version = "v1.1.0" - -[[projects]] - name = "github.com/spf13/cast" - packages = ["."] - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" - -[[projects]] - name = "github.com/spf13/cobra" - packages = [ - ".", - "doc" - ] - revision = "615425954c3b0d9485a7027d4d451fdcdfdee84e" - -[[projects]] - branch = "master" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - name = "github.com/spf13/viper" - packages = ["."] - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "internal/socks", - "proxy" - ] - revision = "1e491301e022f8f977054da4c2d852decd59571f" - -[[projects]] - branch = "master" - name = "golang.org/x/sync" - packages = ["errgroup"] - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows" - ] - revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "internal/gen", - "internal/triegen", - "internal/ucd", - "transform", - "unicode/cldr", - "unicode/norm" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "imports", - "internal/fastwalk" - ] - revision = "a5b4c53f6e8bdcafa95a94671bf2d1203365858b" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - name = "k8s.io/code-generator" - packages = [ - "cmd/deepcopy-gen", - "cmd/deepcopy-gen/args", - "pkg/util" - ] - revision = "7ead8f38b01cf8653249f5af80ce7b2c8aba12e2" - version = "kubernetes-1.10.0" - -[[projects]] - branch = "master" - name = "k8s.io/gengo" - packages = [ - "args", - "examples/deepcopy-gen/generators", - "examples/set-gen/sets", - "generator", - "namer", - "parser", - "types" - ] - revision = "2e1a79edcaecf0bfbde129a1fd55624b66adb699" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "d70a4e8d00df5092c29e042845e6ba476b253a9738e2f4c2d162c0fd17de5c1f" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/google/go-containerregistry/Gopkg.toml b/vendor/github.com/google/go-containerregistry/Gopkg.toml deleted file mode 100644 index 322f3528..00000000 --- a/vendor/github.com/google/go-containerregistry/Gopkg.toml +++ /dev/null @@ -1,45 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# - -# Used for deep copy code generation -required = [ - "k8s.io/code-generator/cmd/deepcopy-gen", -] - -[[constraint]] - name = "k8s.io/code-generator" - version = "kubernetes-1.10.0" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "0.2.0" - -# Use HEAD (2018-04-21) to pick up: -# https://github.com/spf13/cobra/pull/662 -[[constraint]] - name = "github.com/spf13/cobra" - revision = "615425954c3b0d9485a7027d4d451fdcdfdee84e" - -[prune] - go-tests = true - unused-packages = true - non-go = true diff --git a/vendor/github.com/google/go-containerregistry/README.md b/vendor/github.com/google/go-containerregistry/README.md deleted file mode 100644 index 5d19be3a..00000000 --- a/vendor/github.com/google/go-containerregistry/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# go-containerregistry - -[![Build Status](https://travis-ci.org/google/go-containerregistry.svg?branch=master)](https://travis-ci.org/google/go-containerregistry) -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry?status.svg)](https://godoc.org/github.com/google/go-containerregistry) -[![Go Report Card](https://goreportcard.com/badge/google/go-containerregistry)](https://goreportcard.com/report/google/go-containerregistry) - -## Introduction - -This is a golang library for working with container registries. It's largely based on the [Python library of the same name](https://github.com/google/containerregistry), but more hip and uses GitHub as the source of truth. - -## Tools - -This repo hosts two tools built on top of the library. - -### crane - -[`crane`](cmd/crane/doc/crane.md) is a tool for interacting with remote images and registries. - -### ko - -[`ko`](cmd/ko/README.md) is a tool for building and deploying golang applications to kubernetes. diff --git a/vendor/github.com/google/go-containerregistry/WORKSPACE b/vendor/github.com/google/go-containerregistry/WORKSPACE deleted file mode 100644 index 705e9f54..00000000 --- a/vendor/github.com/google/go-containerregistry/WORKSPACE +++ /dev/null @@ -1,41 +0,0 @@ -http_archive( - name = "io_bazel_rules_go", - sha256 = "4b14d8dd31c6dbaf3ff871adcd03f28c3274e42abc855cb8fb4d01233c0154dc", - url = "https://github.com/bazelbuild/rules_go/releases/download/0.10.1/rules_go-0.10.1.tar.gz", -) - -http_archive( - name = "bazel_gazelle", - sha256 = "6228d9618ab9536892aa69082c063207c91e777e51bd3c5544c9c060cafe1bd8", - url = "https://github.com/bazelbuild/bazel-gazelle/releases/download/0.10.0/bazel-gazelle-0.10.0.tar.gz", -) - -load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") - -go_rules_dependencies() - -go_register_toolchains() - -load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") - -gazelle_dependencies() - -git_repository( - name = "io_bazel_rules_docker", - commit = "4d8ec6570a5313fb0128e2354f2bc4323685282a", - remote = "https://github.com/bazelbuild/rules_docker.git", -) - -load( - "@io_bazel_rules_docker//container:container.bzl", - container_repositories = "repositories", -) - -container_repositories() - -load( - "@io_bazel_rules_docker//go:image.bzl", - _go_image_repos = "repositories", -) - -_go_image_repos() diff --git a/vendor/github.com/google/go-containerregistry/cloudbuild.yaml b/vendor/github.com/google/go-containerregistry/cloudbuild.yaml deleted file mode 100644 index 78f30333..00000000 --- a/vendor/github.com/google/go-containerregistry/cloudbuild.yaml +++ /dev/null @@ -1,20 +0,0 @@ -steps: -# Build the ko binary. -- name: gcr.io/cloud-builders/go:debian - env: ['PROJECT_ROOT=github.com/google/go-containerregistry'] - args: ['install', 'github.com/google/go-containerregistry/cmd/ko'] - -# Use the ko binary to build the crane builder image. -- name: gcr.io/cloud-builders/go:debian - env: ['GOPATH=/workspace/gopath', 'KO_DOCKER_REPO=gcr.io/$PROJECT_ID'] - entrypoint: /workspace/gopath/bin/ko - dir: gopath/src - args: ['publish', 'github.com/google/go-containerregistry/cmd/crane'] - -# Use the crane builder to retag the crane builder. -- name: gcr.io/$PROJECT_ID/github.com/google/go-containerregistry/cmd/crane - args: ['copy', 'gcr.io/$PROJECT_ID/github.com/google/go-containerregistry/cmd/crane', 'gcr.io/$PROJECT_ID/crane'] - -# Use the crane builder to get the crane builder's digest. -- name: gcr.io/$PROJECT_ID/crane - args: ['digest', 'gcr.io/$PROJECT_ID/crane'] diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel deleted file mode 100644 index 9d1680b0..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "anon.go", - "auth.go", - "authn.go", - "basic.go", - "bearer.go", - "doc.go", - "helper.go", - "keychain.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/authn", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "anon_test.go", - "basic_test.go", - "bearer_test.go", - "helper_test.go", - "keychain_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/authn", - deps = ["//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library"], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon_test.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon_test.go deleted file mode 100644 index fd015c59..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/anon_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "testing" -) - -func TestAnonymous(t *testing.T) { - hdr, err := Anonymous.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - if hdr != "" { - t.Errorf("Authorization(); got %v, wanted empty string", hdr) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic_test.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic_test.go deleted file mode 100644 index 96774608..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/basic_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "testing" -) - -func TestBasic(t *testing.T) { - anon := &Basic{Username: "foo", Password: "bar"} - - got, err := anon.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - want := "Basic Zm9vOmJhcg==" - if got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer_test.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer_test.go deleted file mode 100644 index b331b9c2..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "testing" -) - -func TestBearer(t *testing.T) { - anon := &Bearer{Token: "bazinga"} - - got, err := anon.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - want := "Bearer bazinga" - if got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/helper_test.go b/vendor/github.com/google/go-containerregistry/pkg/authn/helper_test.go deleted file mode 100644 index 7e5156bf..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/helper_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "errors" - "os/exec" - "testing" - - "github.com/google/go-containerregistry/pkg/name" -) - -var ( - testDomain, _ = name.NewRegistry("foo.dev", name.WeakValidation) -) - -// errorRunner implements runner to always return an execution error. -type errorRunner struct { - err error -} - -// Run implements runner -func (er *errorRunner) Run(*exec.Cmd) error { - return er.err -} - -// printRunner implements runner to write a fixed message to stdout. -type printRunner struct { - msg string -} - -// Run implements runner -func (pr *printRunner) Run(cmd *exec.Cmd) error { - _, err := cmd.Stdout.Write([]byte(pr.msg)) - return err -} - -// errorPrintRunner implements runner to write a fixed message to stdout -// and exit with an error code. -type errorPrintRunner struct { - msg string -} - -// Run implements runner -func (pr *errorPrintRunner) Run(cmd *exec.Cmd) error { - _, err := cmd.Stdout.Write([]byte(pr.msg)) - if err != nil { - return err - } - - return &exec.ExitError{} -} - -func TestHelperError(t *testing.T) { - want := errors.New("fdhskjdfhkjhsf") - h := &helper{name: "test", domain: testDomain, r: &errorRunner{err: want}} - - if _, got := h.Authorization(); got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} - -func TestMagicString(t *testing.T) { - h := &helper{name: "test", domain: testDomain, r: &errorPrintRunner{msg: magicNotFoundMessage}} - - got, err := h.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - - // When we get the magic not found message we should fall back on anonymous authentication. - want, _ := Anonymous.Authorization() - if got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} - -func TestGoodOutput(t *testing.T) { - output := `{"Username": "foo", "Secret": "bar"}` - h := &helper{name: "test", domain: testDomain, r: &printRunner{msg: output}} - - got, err := h.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - - // When we get the magic not found message we should fall back on anonymous authentication. - want := "Basic Zm9vOmJhcg==" - if got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} - -func TestBadOutput(t *testing.T) { - // That extra comma will get ya every time. - output := `{"Username": "foo", "Secret": "bar",}` - h := &helper{name: "test", domain: testDomain, r: &printRunner{msg: output}} - - got, err := h.Authorization() - if err == nil { - t.Errorf("Authorization() = %v", got) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain_test.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain_test.go deleted file mode 100644 index b9977dcc..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain_test.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "runtime" - "testing" - - "github.com/google/go-containerregistry/pkg/name" -) - -func TestConfigDir(t *testing.T) { - clearEnv := func() { - for _, e := range []string{"HOME", "DOCKER_CONFIG", "HOMEDRIVE", "HOMEPATH"} { - os.Unsetenv(e) - } - } - - for _, c := range []struct { - desc string - env map[string]string - want string - wantErr bool - skipOnNonWindows bool - }{{ - desc: "no env set", - env: map[string]string{}, - wantErr: true, - }, { - desc: "DOCKER_CONFIG", - env: map[string]string{"DOCKER_CONFIG": "/path/to/.docker"}, - want: "/path/to/.docker", - }, { - desc: "HOME", - env: map[string]string{"HOME": "/my/home"}, - want: "/my/home/.docker", - }, { - desc: "USERPROFILE", - skipOnNonWindows: true, - env: map[string]string{"USERPROFILE": "/user/profile"}, - want: "/user/profile/.docker", - }} { - t.Run(c.desc, func(t *testing.T) { - if c.skipOnNonWindows && runtime.GOOS != "windows" { - t.Skip("Skipping on non-Windows") - } - clearEnv() - for k, v := range c.env { - os.Setenv(k, v) - } - got, err := configDir() - if err == nil && c.wantErr { - t.Errorf("configDir() returned no error, got %q", got) - } else if err != nil && !c.wantErr { - t.Errorf("configDir(): %v", err) - } - - if got != c.want { - t.Errorf("configDir(); got %q, want %q", got, c.want) - } - }) - } -} - -var ( - fresh = 0 - testRegistry, _ = name.NewRegistry("test.io", name.WeakValidation) -) - -// setupConfigDir sets up an isolated configDir() for this test. -func setupConfigDir(t *testing.T) string { - tmpdir := os.Getenv("TEST_TMPDIR") - if tmpdir == "" { - var err error - tmpdir, err = ioutil.TempDir("", "keychain_test") - if err != nil { - t.Fatalf("creating temp dir: %v", err) - } - } - - fresh = fresh + 1 - p := fmt.Sprintf("%s/%d", tmpdir, fresh) - os.Setenv("DOCKER_CONFIG", p) - if err := os.Mkdir(p, 0777); err != nil { - t.Fatalf("mkdir %q: %v", p, err) - } - return p -} - -func setupConfigFile(t *testing.T, content string) { - p := path.Join(setupConfigDir(t), "config.json") - if err := ioutil.WriteFile(p, []byte(content), 0600); err != nil { - t.Fatalf("write %q: %v", p, err) - } -} - -func checkOutput(t *testing.T, want string) { - auth, err := DefaultKeychain.Resolve(testRegistry) - if err != nil { - t.Errorf("Resolve() = %v", err) - } - - got, err := auth.Authorization() - if err != nil { - t.Errorf("Authorization() = %v", err) - } - if got != want { - t.Errorf("Authorization(); got %v, want %v", got, want) - } -} - -func checkAnonymousFallback(t *testing.T) { - checkOutput(t, "") -} - -func checkFooBarOutput(t *testing.T) { - // base64(foo:bar) - checkOutput(t, "Basic Zm9vOmJhcg==") -} - -func checkHelper(t *testing.T) { - auth, err := DefaultKeychain.Resolve(testRegistry) - if err != nil { - t.Errorf("Resolve() = %v", err) - } - - help, ok := auth.(*helper) - if !ok { - t.Errorf("Resolve(); got %T, want *helper", auth) - } - if help.name != "test" { - t.Errorf("Resolve().name; got %v, want \"test\"", help.name) - } - if help.domain != testRegistry { - t.Errorf("Resolve().domain; got %v, want %v", help.domain, testRegistry) - } -} - -func TestNoConfig(t *testing.T) { - setupConfigDir(t) - - checkAnonymousFallback(t) -} - -func TestVariousPaths(t *testing.T) { - tests := []struct { - content string - check func(*testing.T) - }{{ - content: `}{`, - check: checkAnonymousFallback, - }, { - content: `{"credHelpers": {"https://test.io": "test"}}`, - check: checkHelper, - }, { - content: `{"credsStore": "test"}`, - check: checkHelper, - }, { - content: `{"auths": {"http://test.io/v2/": {"auth": "Zm9vOmJhcg=="}}}`, - check: checkFooBarOutput, - }, { - content: `{"auths": {"https://test.io/v1/": {"username": "foo", "password": "bar"}}}`, - check: checkFooBarOutput, - }, { - content: `{"auths": {"other.io": {"username": "asdf", "password": "fdsa"}}}`, - check: checkAnonymousFallback, - }} - - for _, test := range tests { - setupConfigFile(t, test.content) - - test.check(t) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel deleted file mode 100644 index 84d6e17c..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "check.go", - "digest.go", - "errors.go", - "ref.go", - "registry.go", - "repository.go", - "tag.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/name", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "digest_test.go", - "ref_test.go", - "registry_test.go", - "repository_test.go", - "tag_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/name", -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest_test.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest_test.go deleted file mode 100644 index 119a4308..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "testing" -) - -const validDigest = "sha256:deadb33fdeadb33fdeadb33fdeadb33fdeadb33fdeadb33fdeadb33fdeadb33f" - -var goodStrictValidationDigestNames = []string{ - "gcr.io/g-convoy/hello-world@" + validDigest, - "gcr.io/google.com/project-id/hello-world@" + validDigest, - "us.gcr.io/project-id/sub-repo@" + validDigest, - "example.text/foo/bar@" + validDigest, -} - -var goodWeakValidationDigestNames = []string{ - "namespace/pathcomponent/image@" + validDigest, - "library/ubuntu@" + validDigest, - "gcr.io/project-id/missing-digest@", -} - -var badDigestNames = []string{ - "gcr.io/project-id/unknown-alg@unknown:abc123", - "gcr.io/project-id/wrong-length@sha256:d34db33fd34db33f", -} - -func TestNewDigestStrictValidation(t *testing.T) { - t.Parallel() - - for _, name := range goodStrictValidationDigestNames { - if digest, err := NewDigest(name, StrictValidation); err != nil { - t.Errorf("`%s` should be a valid Digest name, got error: %v", name, err) - } else if digest.Name() != name { - t.Errorf("`%v` .Name() should reproduce the original name. Wanted: %s Got: %s", digest, name, digest.Name()) - } - } - - for _, name := range append(goodWeakValidationDigestNames, badDigestNames...) { - if repo, err := NewDigest(name, StrictValidation); err == nil { - t.Errorf("`%s` should be an invalid Digest name, got Digest: %#v", name, repo) - } - } -} - -func TestNewDigest(t *testing.T) { - t.Parallel() - - for _, name := range append(goodStrictValidationDigestNames, goodWeakValidationDigestNames...) { - if _, err := NewDigest(name, WeakValidation); err != nil { - t.Errorf("`%s` should be a valid Digest name, got error: %v", name, err) - } - } - - for _, name := range badDigestNames { - if repo, err := NewDigest(name, WeakValidation); err == nil { - t.Errorf("`%s` should be an invalid Digest name, got Digest: %#v", name, repo) - } - } -} - -func TestDigestComponents(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepository := "project-id/image" - - digestNameStr := testRegistry + "/" + testRepository + "@" + validDigest - digest, err := NewDigest(digestNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Digest name, got error: %v", digestNameStr, err) - } - - actualRegistry := digest.RegistryStr() - if actualRegistry != testRegistry { - t.Errorf("RegistryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", digest, testRegistry, actualRegistry) - } - actualRepository := digest.RepositoryStr() - if actualRepository != testRepository { - t.Errorf("RepositoryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", digest, testRepository, actualRepository) - } - actualDigest := digest.DigestStr() - if actualDigest != validDigest { - t.Errorf("DigestStr() was incorrect for %v. Wanted: `%s` Got: `%s`", digest, validDigest, actualDigest) - } -} - -func TestDigestScopes(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepo := "project-id/image" - testAction := "pull" - - expectedScope := strings.Join([]string{"repository", testRepo, testAction}, ":") - - digestNameStr := testRegistry + "/" + testRepo + "@" + validDigest - digest, err := NewDigest(digestNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Digest name, got error: %v", digestNameStr, err) - } - - actualScope := digest.Scope(testAction) - if actualScope != expectedScope { - t.Errorf("scope was incorrect for %v. Wanted: `%s` Got: `%s`", digest, expectedScope, actualScope) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref_test.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref_test.go deleted file mode 100644 index a594592c..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "testing" -) - -func TestParseReference(t *testing.T) { - for _, name := range goodWeakValidationDigestNames { - ref, err := ParseReference(name, WeakValidation) - if err != nil { - t.Errorf("ParseReference(%q); %v", name, err) - } - dig, err := NewDigest(name, WeakValidation) - if err != nil { - t.Errorf("NewDigest(%q); %v", name, err) - } - if ref != dig { - t.Errorf("ParseReference(%q) != NewDigest(%q); got %v, want %v", name, name, ref, dig) - } - } - - for _, name := range goodStrictValidationDigestNames { - ref, err := ParseReference(name, StrictValidation) - if err != nil { - t.Errorf("ParseReference(%q); %v", name, err) - } - dig, err := NewDigest(name, StrictValidation) - if err != nil { - t.Errorf("NewDigest(%q); %v", name, err) - } - if ref != dig { - t.Errorf("ParseReference(%q) != NewDigest(%q); got %v, want %v", name, name, ref, dig) - } - } - - for _, name := range badDigestNames { - if _, err := ParseReference(name, WeakValidation); err == nil { - t.Errorf("ParseReference(%q); expected error, got none", name) - } - } - - for _, name := range goodWeakValidationTagNames { - ref, err := ParseReference(name, WeakValidation) - if err != nil { - t.Errorf("ParseReference(%q); %v", name, err) - } - tag, err := NewTag(name, WeakValidation) - if err != nil { - t.Errorf("NewTag(%q); %v", name, err) - } - if ref != tag { - t.Errorf("ParseReference(%q) != NewTag(%q); got %v, want %v", name, name, ref, tag) - } - } - - for _, name := range goodStrictValidationTagNames { - ref, err := ParseReference(name, StrictValidation) - if err != nil { - t.Errorf("ParseReference(%q); %v", name, err) - } - tag, err := NewTag(name, StrictValidation) - if err != nil { - t.Errorf("NewTag(%q); %v", name, err) - } - if ref != tag { - t.Errorf("ParseReference(%q) != NewTag(%q); got %v, want %v", name, name, ref, tag) - } - } - - for _, name := range badTagNames { - if _, err := ParseReference(name, WeakValidation); err == nil { - t.Errorf("ParseReference(%q); expected error, got none", name) - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry_test.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry_test.go deleted file mode 100644 index 36ff808a..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "testing" -) - -var goodStrictValidationRegistryNames = []string{ - "gcr.io", - "gcr.io:9001", - "index.docker.io", - "us.gcr.io", - "example.text", - "localhost", - "localhost:9090", -} - -var goodWeakValidationRegistryNames = []string{ - "", -} - -var badRegistryNames = []string{ - "white space", - "gcr?com", -} - -func TestNewRegistryStrictValidation(t *testing.T) { - t.Parallel() - - for _, name := range goodStrictValidationRegistryNames { - if registry, err := NewRegistry(name, StrictValidation); err != nil { - t.Errorf("`%s` should be a valid Registry name, got error: %v", name, err) - } else if registry.Name() != name { - t.Errorf("`%v` .Name() should reproduce the original name. Wanted: %s Got: %s", registry, name, registry.Name()) - } - } - - for _, name := range append(goodWeakValidationRegistryNames, badRegistryNames...) { - if repo, err := NewRegistry(name, StrictValidation); err == nil { - t.Errorf("`%s` should be an invalid Registry name, got Registry: %#v", name, repo) - } - } -} - -func TestNewRegistry(t *testing.T) { - t.Parallel() - - for _, name := range append(goodStrictValidationRegistryNames, goodWeakValidationRegistryNames...) { - if _, err := NewRegistry(name, WeakValidation); err != nil { - t.Errorf("`%s` should be a valid Registry name, got error: %v", name, err) - } - } - - for _, name := range badRegistryNames { - if repo, err := NewRegistry(name, WeakValidation); err == nil { - t.Errorf("`%s` should be an invalid Registry name, got Registry: %#v", name, repo) - } - } -} - -func TestDefaultRegistryNames(t *testing.T) { - testRegistries := []string{"docker.io", ""} - - for _, testRegistry := range testRegistries { - registry, err := NewRegistry(testRegistry, WeakValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Registry name, got error: %v", testRegistry, err) - } - - actualRegistry := registry.RegistryStr() - if actualRegistry != DefaultRegistry { - t.Errorf("RegistryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", registry, DefaultRegistry, actualRegistry) - } - } -} - -func TestRegistryComponents(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - - registry, err := NewRegistry(testRegistry, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Registry name, got error: %v", testRegistry, err) - } - - actualRegistry := registry.RegistryStr() - if actualRegistry != testRegistry { - t.Errorf("RegistryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", registry, testRegistry, actualRegistry) - } -} - -func TestRegistryScopes(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testAction := "whatever" - - expectedScope := "registry:catalog:*" - - registry, err := NewRegistry(testRegistry, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Registry name, got error: %v", testRegistry, err) - } - - actualScope := registry.Scope(testAction) - if actualScope != expectedScope { - t.Errorf("scope was incorrect for %v. Wanted: `%s` Got: `%s`", registry, expectedScope, actualScope) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository_test.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository_test.go deleted file mode 100644 index b58489de..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/repository_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "testing" -) - -var goodStrictValidationRepositoryNames = []string{ - "gcr.io/g-convoy/hello-world", - "gcr.io/google.com/project-id/hello-world", - "us.gcr.io/project-id/sub-repo", - "example.text/foo/bar", - "mirror.gcr.io/ubuntu", - "index.docker.io/library/ubuntu", -} - -var goodWeakValidationRepositoryNames = []string{ - "namespace/pathcomponent/image", - "library/ubuntu", - "ubuntu", -} - -var badRepositoryNames = []string{ - "white space", - "b@char/image", -} - -func TestNewRepositoryStrictValidation(t *testing.T) { - t.Parallel() - - for _, name := range goodStrictValidationRepositoryNames { - if repository, err := NewRepository(name, StrictValidation); err != nil { - t.Errorf("`%s` should be a valid Repository name, got error: %v", name, err) - } else if repository.Name() != name { - t.Errorf("`%v` .Name() should reproduce the original name. Wanted: %s Got: %s", repository, name, repository.Name()) - } - } - - for _, name := range append(goodWeakValidationRepositoryNames, badRepositoryNames...) { - if repo, err := NewRepository(name, StrictValidation); err == nil { - t.Errorf("`%s` should be an invalid repository name, got Repository: %#v", name, repo) - } - } -} - -func TestNewRepository(t *testing.T) { - t.Parallel() - - for _, name := range append(goodStrictValidationRepositoryNames, goodWeakValidationRepositoryNames...) { - if _, err := NewRepository(name, WeakValidation); err != nil { - t.Errorf("`%s` should be a valid repository name, got error: %v", name, err) - } - } - - for _, name := range badRepositoryNames { - if repo, err := NewRepository(name, WeakValidation); err == nil { - t.Errorf("`%s` should be an invalid repository name, got Repository: %#v", name, repo) - } - } -} - -func TestRepositoryComponents(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepository := "project-id/image" - - repositoryNameStr := testRegistry + "/" + testRepository - repository, err := NewRepository(repositoryNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Repository name, got error: %v", repositoryNameStr, err) - } - - actualRegistry := repository.RegistryStr() - if actualRegistry != testRegistry { - t.Errorf("RegistryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", repository, testRegistry, actualRegistry) - } - actualRepository := repository.RepositoryStr() - if actualRepository != testRepository { - t.Errorf("RepositoryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", repository, testRepository, actualRepository) - } -} - -func TestRepositoryScopes(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepo := "project-id/image" - testAction := "pull" - - expectedScope := strings.Join([]string{"repository", testRepo, testAction}, ":") - - repositoryNameStr := testRegistry + "/" + testRepo - repository, err := NewRepository(repositoryNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Repository name, got error: %v", repositoryNameStr, err) - } - - actualScope := repository.Scope(testAction) - if actualScope != expectedScope { - t.Errorf("scope was incorrect for %v. Wanted: `%s` Got: `%s`", repository, expectedScope, actualScope) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag_test.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag_test.go deleted file mode 100644 index 1242888d..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/tag_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "testing" -) - -var goodStrictValidationTagNames = []string{ - "gcr.io/g-convoy/hello-world:latest", - "gcr.io/google.com/g-convoy/hello-world:latest", - "gcr.io/project-id/with-nums:v2", - "us.gcr.io/project-id/image:with.period.in.tag", - "gcr.io/project-id/image:w1th-alpha_num3ric.PLUScaps", - "domain.with.port:9001/image:latest", -} - -var goodWeakValidationTagNames = []string{ - "namespace/pathcomponent/image", - "library/ubuntu", - "gcr.io/project-id/implicit-latest", - "www.example.test:12345/repo/path", -} - -var badTagNames = []string{ - "gcr.io/project-id/bad_chars:c@n'tuse", - "gcr.io/project-id/wrong-length:white space", - "gcr.io/project-id/too-many-chars:thisisthetagthatneverendsitgoesonandonmyfriendsomepeoplestartedtaggingitnotknowingwhatitwasandtheyllcontinuetaggingitforeverjustbecausethisisthetagthatneverends", -} - -func TestNewTagStrictValidation(t *testing.T) { - t.Parallel() - - for _, name := range goodStrictValidationTagNames { - if tag, err := NewTag(name, StrictValidation); err != nil { - t.Errorf("`%s` should be a valid Tag name, got error: %v", name, err) - } else if tag.Name() != name { - t.Errorf("`%v` .Name() should reproduce the original name. Wanted: %s Got: %s", tag, name, tag.Name()) - } - } - - for _, name := range append(goodWeakValidationTagNames, badTagNames...) { - if tag, err := NewTag(name, StrictValidation); err == nil { - t.Errorf("`%s` should be an invalid Tag name, got Tag: %#v", name, tag) - } - } -} - -func TestNewTag(t *testing.T) { - t.Parallel() - - for _, name := range append(goodStrictValidationTagNames, goodWeakValidationTagNames...) { - if _, err := NewTag(name, WeakValidation); err != nil { - t.Errorf("`%s` should be a valid Tag name, got error: %v", name, err) - } - } - - for _, name := range badTagNames { - if tag, err := NewTag(name, WeakValidation); err == nil { - t.Errorf("`%s` should be an invalid Tag name, got Tag: %#v", name, tag) - } - } -} - -func TestTagComponents(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepository := "project-id/image" - testTag := "latest" - - tagNameStr := testRegistry + "/" + testRepository + ":" + testTag - tag, err := NewTag(tagNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Tag name, got error: %v", tagNameStr, err) - } - - actualRegistry := tag.RegistryStr() - if actualRegistry != testRegistry { - t.Errorf("RegistryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", tag, testRegistry, actualRegistry) - } - actualRepository := tag.RepositoryStr() - if actualRepository != testRepository { - t.Errorf("RepositoryStr() was incorrect for %v. Wanted: `%s` Got: `%s`", tag, testRepository, actualRepository) - } - actualTag := tag.TagStr() - if actualTag != testTag { - t.Errorf("TagStr() was incorrect for %v. Wanted: `%s` Got: `%s`", tag, testTag, actualTag) - } -} - -func TestTagScopes(t *testing.T) { - t.Parallel() - testRegistry := "gcr.io" - testRepo := "project-id/image" - testTag := "latest" - testAction := "pull" - - expectedScope := strings.Join([]string{"repository", testRepo, testAction}, ":") - - tagNameStr := testRegistry + "/" + testRepo + ":" + testTag - tag, err := NewTag(tagNameStr, StrictValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Tag name, got error: %v", tagNameStr, err) - } - - actualScope := tag.Scope(testAction) - if actualScope != expectedScope { - t.Errorf("scope was incorrect for %v. Wanted: `%s` Got: `%s`", tag, expectedScope, actualScope) - } -} - -func TestAllDefaults(t *testing.T) { - tagNameStr := "ubuntu" - tag, err := NewTag(tagNameStr, WeakValidation) - if err != nil { - t.Fatalf("`%s` should be a valid Tag name, got error: %v", tagNameStr, err) - } - - expectedName := "index.docker.io/library/ubuntu:latest" - actualName := tag.Name() - if actualName != expectedName { - t.Errorf("Name() was incorrect for %v. Wanted: `%s` Got: `%s`", tag, expectedName, actualName) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel deleted file mode 100644 index da947b3b..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "doc.go", - "hash.go", - "image.go", - "layer.go", - "manifest.go", - "zz_deepcopy_generated.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "hash_test.go", - "manifest_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1", - deps = ["//vendor/github.com/google/go-cmp/cmp:go_default_library"], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/BUILD.bazel deleted file mode 100644 index 0a8ad93b..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "image.go", - "write.go", - ], - data = [ - "//pkg/v1/tarball:test_image_1.tar", # keep - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/daemon", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/docker/docker/client:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/tarball:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "image_test.go", - "write_test.go", - ], - data = ["//pkg/v1/tarball:test_image_1.tar"], # keep - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/daemon", - deps = [ - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/tarball:go_default_library", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image_test.go deleted file mode 100644 index 5becaaab..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "context" - "io" - "os" - "reflect" - "testing" - - "github.com/google/go-containerregistry/pkg/name" - - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -var imagePath = "../tarball/test_image_1.tar" - -type MockImageSaver struct { - path string -} - -func (m *MockImageSaver) ImageSave(_ context.Context, _ []string) (io.ReadCloser, error) { - return os.Open(m.path) -} - -func init() { - getImageSaver = func() (ImageSaver, error) { - return &MockImageSaver{path: imagePath}, nil - } -} - -func TestImage(t *testing.T) { - testImage, err := tarball.ImageFromPath(imagePath, nil) - if err != nil { - t.Fatalf("error loading test image: %s", err) - } - - tag, err := name.NewTag("unused", name.WeakValidation) - if err != nil { - t.Fatalf("error creating test name: %s", err) - } - - runTest := func(buffered bool) { - daemonImage, err := Image(tag, &ReadOptions{Buffer: buffered}) - if err != nil { - t.Errorf("Error loading daemon image: %s", err) - } - - dmfst, err := daemonImage.Manifest() - if err != nil { - t.Errorf("Error getting daemon manifest: %s", err) - } - tmfst, err := testImage.Manifest() - if err != nil { - t.Errorf("Error getting test manifest: %s", err) - } - if !reflect.DeepEqual(dmfst, tmfst) { - t.Errorf("%v != %v", testImage, daemonImage) - } - } - - runTest(false) - runTest(true) - -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write_test.go deleted file mode 100644 index 0715542c..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "context" - "io" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -type MockImageLoader struct{} - -func (m *MockImageLoader) ImageLoad(_ context.Context, _ io.Reader, _ bool) (types.ImageLoadResponse, error) { - return types.ImageLoadResponse{ - Body: ioutil.NopCloser(strings.NewReader("Loaded")), - }, nil -} - -func init() { - GetImageLoader = func() (ImageLoader, error) { - return &MockImageLoader{}, nil - } -} - -func TestWriteImage(t *testing.T) { - image, err := tarball.ImageFromPath("../tarball/test_image_1.tar", nil) - if err != nil { - t.Errorf("Error loading image: %v", err.Error()) - } - tag, err := name.NewTag("test_image_2:latest", name.WeakValidation) - if err != nil { - t.Errorf(err.Error()) - } - response, err := Write(tag, image, WriteOptions{}) - if err != nil { - t.Errorf("Error writing image tar: %s", err.Error()) - } - if !strings.Contains(response, "Loaded") { - t.Errorf("Error loading image. Response: %s", response) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/BUILD.bazel deleted file mode 100644 index 200c12d9..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "image.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/empty", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/google/go-containerregistry/pkg/v1/random:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["image_test.go"], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/empty", -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image_test.go deleted file mode 100644 index a2eb64fd..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package empty - -import ( - "testing" -) - -func TestManifestAndConfig(t *testing.T) { - manifest, err := Image.Manifest() - if err != nil { - t.Fatalf("Error loading manifest: %v", err) - } - if got, want := len(manifest.Layers), 0; got != want { - t.Fatalf("num layers; got %v, want %v", got, want) - } - - config, err := Image.ConfigFile() - if err != nil { - t.Fatalf("Error loading config file: %v", err) - } - if got, want := len(config.RootFS.DiffIDs), 0; got != want { - t.Fatalf("num diff ids; got %v, want %v", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash_test.go deleted file mode 100644 index cfa81137..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "strings" - "testing" -) - -func TestGoodHashes(t *testing.T) { - good := []string{ - "sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - } - - for _, s := range good { - h, err := NewHash(s) - if err != nil { - t.Errorf("Unexpected error parsing hash: %v", err) - } - if got, want := h.String(), s; got != want { - t.Errorf("String(); got %q, want %q", got, want) - } - } -} - -func TestBadHashes(t *testing.T) { - bad := []string{ - // Too short - "sha256:deadbeef", - // Bad character - "sha256:o123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - // Unknown algorithm - "md5:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - // Too few parts - "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - // Too many parts - "md5:sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - } - - for _, s := range bad { - h, err := NewHash(s) - if err == nil { - t.Errorf("Expected error, got: %v", h) - } - } -} - -func TestSHA256(t *testing.T) { - input := "asdf" - h, n, err := SHA256(strings.NewReader(input)) - if err != nil { - t.Errorf("SHA256(asdf) = %v", err) - } - if got, want := h.Algorithm, "sha256"; got != want { - t.Errorf("Algorithm; got %v, want %v", got, want) - } - if got, want := h.Hex, "f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b"; got != want { - t.Errorf("Hex; got %v, want %v", got, want) - } - if got, want := n, int64(len(input)); got != want { - t.Errorf("n; got %v, want %v", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest_test.go deleted file mode 100644 index 42a4b1a1..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestGoodManifestSimple(t *testing.T) { - got, err := ParseManifest(strings.NewReader(`{}`)) - if err != nil { - t.Errorf("Unexpected error parsing manifest: %v", err) - } - - want := Manifest{} - if diff := cmp.Diff(want, *got); diff != "" { - t.Errorf("ParseManifest({}); (-want +got) %s", diff) - } -} - -func TestGoodManifestWithHash(t *testing.T) { - good, err := ParseManifest(strings.NewReader(`{ - "config": { - "digest": "sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" - } -}`)) - if err != nil { - t.Errorf("Unexpected error parsing manifest: %v", err) - } - - if got, want := good.Config.Digest.Algorithm, "sha256"; got != want { - t.Errorf("ParseManifest().Config.Digest.Algorithm; got %v, want %v", got, want) - } -} - -func TestManifestWithBadHash(t *testing.T) { - bad, err := ParseManifest(strings.NewReader(`{ - "config": { - "digest": "sha256:deadbeed" - } -}`)) - if err == nil { - t.Errorf("Expected error parsing manifest, but got: %v", bad) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/BUILD.bazel deleted file mode 100644 index 71c13bb5..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/BUILD.bazel +++ /dev/null @@ -1,97 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "mutate.go", - "rebase.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/mutate", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/empty:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/partial:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "mutate_test.go", - "rebase_test.go", - ], - data = glob(["testdata/**"]) + [ - ":whiteout_image.tar", - ":overwritten_file.tar", - ":source_image.tar", # keep - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/mutate", - deps = [ - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/random:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/tarball:go_default_library", - ], -) - -load( - "@io_bazel_rules_docker//container:container.bzl", - "container_image", - "container_layer", -) - -container_image( - name = "source_image", - files = [ - "testdata/bar", - "testdata/foo", - ], -) - -container_layer( - name = "base_layer", - files = [ - "testdata/whiteout/bar.txt", - "testdata/whiteout/foo.txt", - ], -) - -container_layer( - name = "whiteout_layer", - files = [ - "testdata/whiteout/.wh.foo.txt", - ], -) - -container_image( - name = "whiteout_image", - layers = [ - ":base_layer", - ":whiteout_layer", - ], -) - -container_layer( - name = "unmodified_foo", - files = [ - "testdata/whiteout/foo.txt", - ], -) - -container_layer( - name = "symlinked_foo", - files = ["testdata/whiteout/bar.txt"], - symlinks = {"foo.txt": "bar.txt"}, -) - -container_image( - name = "overwritten_file", - layers = [ - ":unmodified_foo", - ":symlinked_foo", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate_test.go deleted file mode 100644 index e4c87627..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate_test.go +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "archive/tar" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -func TestExtractWhiteout(t *testing.T) { - img, err := tarball.ImageFromPath("whiteout_image.tar", nil) - if err != nil { - t.Errorf("Error loading image: %v", err) - } - tarPath, _ := filepath.Abs("img.tar") - defer os.Remove(tarPath) - tr := tar.NewReader(Extract(img)) - for { - header, err := tr.Next() - if err == io.EOF { - break - } - name := header.Name - for _, part := range filepath.SplitList(name) { - if part == "foo" { - t.Errorf("whiteout file found in tar: %v", name) - } - } - } -} - -func TestExtractOverwrittenFile(t *testing.T) { - img, err := tarball.ImageFromPath("overwritten_file.tar", nil) - if err != nil { - t.Fatalf("Error loading image: %v", err) - } - tr := tar.NewReader(Extract(img)) - for { - header, err := tr.Next() - if err == io.EOF { - break - } - name := header.Name - if strings.Contains(name, "foo.txt") { - var buf bytes.Buffer - buf.ReadFrom(tr) - if strings.Contains(buf.String(), "foo") { - t.Errorf("Contents of file were not correctly overwritten") - } - } - } -} - -// TestExtractError tests that if there are any errors encountered -func TestExtractError(t *testing.T) { - rc := Extract(invalidImage{}) - if _, err := io.Copy(ioutil.Discard, rc); err == nil { - t.Errorf("rc.Read; got nil error") - } else if !strings.Contains(err.Error(), errInvalidImage.Error()) { - t.Errorf("rc.Read; got %v, want %v", err, errInvalidImage) - } -} - -// TestExtractPartialRead tests that the reader can be partially read (e.g., -// tar headers) and closed without error. -func TestExtractPartialRead(t *testing.T) { - rc := Extract(invalidImage{}) - if _, err := io.Copy(ioutil.Discard, io.LimitReader(rc, 1)); err != nil { - t.Errorf("Could not read one byte from reader") - } - if err := rc.Close(); err != nil { - t.Errorf("rc.Close: %v", err) - } -} - -// invalidImage is an image which returns an error when Layers() is called. -type invalidImage struct { - v1.Image -} - -var errInvalidImage = errors.New("invalid image") - -func (invalidImage) Layers() ([]v1.Layer, error) { - return nil, errInvalidImage -} - -func TestWhiteoutDir(t *testing.T) { - fsMap := map[string]bool{ - "baz": true, - "red/blue": true, - } - var tests = []struct { - path string - whiteout bool - }{ - {"usr/bin", false}, - {"baz/foo.txt", true}, - {"baz/bar/foo.txt", true}, - {"red/green", false}, - {"red/yellow.txt", false}, - } - - for _, tt := range tests { - whiteout := inWhiteoutDir(fsMap, tt.path) - if whiteout != tt.whiteout { - t.Errorf("Whiteout %s: expected %v, but got %v", tt.path, tt.whiteout, whiteout) - } - } -} - -func TestNoopCondition(t *testing.T) { - source := sourceImage(t) - - result, err := AppendLayers(source, []v1.Layer{}...) - if err != nil { - t.Fatalf("Unexpected error creating a writable image: %v", err) - } - - if !manifestsAreEqual(t, source, result) { - t.Error("manifests are not the same") - } - - if !configFilesAreEqual(t, source, result) { - t.Fatal("config files are not the same") - } -} - -func TestAppendWithHistory(t *testing.T) { - source := sourceImage(t) - - addendum := Addendum{ - Layer: mockLayer{}, - History: v1.History{ - Author: "dave", - }, - } - - result, err := Append(source, addendum) - if err != nil { - t.Fatalf("failed to append: %v", err) - } - - layers := getLayers(t, result) - - if diff := cmp.Diff(layers[1], mockLayer{}); diff != "" { - t.Fatalf("correct layer was not appended (-got, +want) %v", diff) - } - - if configSizesAreEqual(t, source, result) { - t.Fatal("adding a layer MUST change the config file size") - } - - cf := getConfigFile(t, result) - - if diff := cmp.Diff(cf.History[1], addendum.History); diff != "" { - t.Fatalf("the appended history is not the same (-got, +want) %s", diff) - } -} - -func TestAppendLayers(t *testing.T) { - source := sourceImage(t) - result, err := AppendLayers(source, mockLayer{}) - if err != nil { - t.Fatalf("failed to append a layer: %v", err) - } - - if manifestsAreEqual(t, source, result) { - t.Fatal("appending a layer did not mutate the manifest") - } - - if configFilesAreEqual(t, source, result) { - t.Fatal("appending a layer did not mutate the config file") - } - - if configSizesAreEqual(t, source, result) { - t.Fatal("adding a layer MUST change the config file size") - } - - layers := getLayers(t, result) - - if got, want := len(layers), 2; got != want { - t.Fatalf("Layers did not return the appended layer "+ - "- got size %d; expected 2", len(layers)) - } - - if diff := cmp.Diff(layers[1], mockLayer{}); diff != "" { - t.Fatalf("correct layer was not appended (-got, +want) %v", diff) - } - - assertLayerOrderMatchesConfig(t, result) - assertLayerOrderMatchesManifest(t, result) - assertQueryingForLayerSucceeds(t, result, layers[1]) -} - -func TestMutateConfig(t *testing.T) { - source := sourceImage(t) - cfg, err := source.ConfigFile() - if err != nil { - t.Fatalf("error getting source config file") - } - - newEnv := []string{"foo=bar"} - cfg.Config.Env = newEnv - result, err := Config(source, cfg.Config) - if err != nil { - t.Fatalf("failed to mutate a config: %v", err) - } - - if manifestsAreEqual(t, source, result) { - t.Fatal("mutating the config MUST mutate the manifest") - } - - if configFilesAreEqual(t, source, result) { - t.Fatal("mutating the config did not mutate the config file") - } - - if configSizesAreEqual(t, source, result) { - t.Fatal("adding an enviornment variable MUST change the config file size") - } - - if !reflect.DeepEqual(cfg.Config.Env, newEnv) { - t.Fatalf("incorrect environment set %v!=%v", cfg.Config.Env, newEnv) - } -} - -func TestMutateCreatedAt(t *testing.T) { - source := sourceImage(t) - want := time.Now().Add(-2 * time.Minute) - result, err := CreatedAt(source, v1.Time{want}) - if err != nil { - t.Fatalf("failed to mutate a config: %v", err) - } - - if configDigestsAreEqual(t, source, result) { - t.Fatal("mutating the created time MUST mutate the config digest") - } - - got := getConfigFile(t, result).Created.Time - if got != want { - t.Fatal("mutating the created time MUST mutate the time from %v to %v", got, want) - } -} - -func assertQueryingForLayerSucceeds(t *testing.T, image v1.Image, layer v1.Layer) { - t.Helper() - - queryTestCases := []struct { - name string - expectedLayer v1.Layer - hash func() (v1.Hash, error) - query func(v1.Hash) (v1.Layer, error) - }{ - {"digest", layer, layer.Digest, image.LayerByDigest}, - {"diff id", layer, layer.DiffID, image.LayerByDiffID}, - } - - for _, tc := range queryTestCases { - t.Run(fmt.Sprintf("layer by %s", tc.name), func(t *testing.T) { - hash, err := tc.hash() - if err != nil { - t.Fatalf("Unable to fetch %s for layer: %v", tc.name, err) - } - - gotLayer, err := tc.query(hash) - if err != nil { - t.Fatalf("Unable to fetch layer from %s: %v", tc.name, err) - } - - if gotLayer != tc.expectedLayer { - t.Fatalf("Querying layer using %s does not return the expected layer %+v %+v", tc.name, gotLayer, tc.expectedLayer) - } - }) - } - -} - -func sourceImage(t *testing.T) v1.Image { - t.Helper() - - image, err := tarball.ImageFromPath("source_image.tar", nil) - if err != nil { - t.Fatalf("Error loading image: %v", err) - } - return image -} - -func getManifest(t *testing.T, i v1.Image) *v1.Manifest { - t.Helper() - - m, err := i.Manifest() - if err != nil { - t.Fatalf("Error fetching image manifest: %v", err) - } - - return m -} - -func getLayers(t *testing.T, i v1.Image) []v1.Layer { - t.Helper() - - l, err := i.Layers() - if err != nil { - t.Fatalf("Error fetching image layers: %v", err) - } - - return l -} - -func getConfigFile(t *testing.T, i v1.Image) *v1.ConfigFile { - t.Helper() - - c, err := i.ConfigFile() - if err != nil { - t.Fatalf("Error fetching image config file: %v", err) - } - - return c -} - -func configFilesAreEqual(t *testing.T, first, second v1.Image) bool { - t.Helper() - - fc := getConfigFile(t, first) - sc := getConfigFile(t, second) - - return cmp.Equal(fc, sc) -} - -func configDigestsAreEqual(t *testing.T, first, second v1.Image) bool { - t.Helper() - - fm := getManifest(t, first) - sm := getManifest(t, second) - - return fm.Config.Digest == sm.Config.Digest -} - -func configSizesAreEqual(t *testing.T, first, second v1.Image) bool { - t.Helper() - - fm := getManifest(t, first) - sm := getManifest(t, second) - - return fm.Config.Size == sm.Config.Size -} - -func manifestsAreEqual(t *testing.T, first, second v1.Image) bool { - t.Helper() - - fm := getManifest(t, first) - sm := getManifest(t, second) - - return cmp.Equal(fm, sm) -} - -func assertLayerOrderMatchesConfig(t *testing.T, i v1.Image) { - t.Helper() - - layers := getLayers(t, i) - cf := getConfigFile(t, i) - - if got, want := len(layers), len(cf.RootFS.DiffIDs); got != want { - t.Fatalf("Difference in size between the image layers (%d) "+ - "and the config file diff ids (%d)", got, want) - } - - for i := range layers { - diffID, err := layers[i].DiffID() - if err != nil { - t.Fatalf("Unable to fetch layer diff id: %v", err) - } - - if got, want := diffID, cf.RootFS.DiffIDs[i]; got != want { - t.Fatalf("Layer diff id (%v) is not at the expected index (%d) in %+v", - got, i, cf.RootFS.DiffIDs) - } - } -} - -func assertLayerOrderMatchesManifest(t *testing.T, i v1.Image) { - t.Helper() - - layers := getLayers(t, i) - mf := getManifest(t, i) - - if got, want := len(layers), len(mf.Layers); got != want { - t.Fatalf("Difference in size between the image layers (%d) "+ - "and the manifest layers (%d)", got, want) - } - - for i := range layers { - digest, err := layers[i].Digest() - if err != nil { - t.Fatalf("Unable to fetch layer diff id: %v", err) - } - - if got, want := digest, mf.Layers[i].Digest; got != want { - t.Fatalf("Layer digest (%v) is not at the expected index (%d) in %+v", - got, i, mf.Layers) - } - } -} - -type mockLayer struct{} - -func (m mockLayer) Digest() (v1.Hash, error) { - return v1.Hash{Algorithm: "fake", Hex: "digest"}, nil -} - -func (m mockLayer) DiffID() (v1.Hash, error) { - return v1.Hash{Algorithm: "fake", Hex: "diff id"}, nil -} - -func (m mockLayer) Size() (int64, error) { return 137438691328, nil } -func (m mockLayer) Compressed() (io.ReadCloser, error) { - return ioutil.NopCloser(strings.NewReader("compressed times")), nil -} -func (m mockLayer) Uncompressed() (io.ReadCloser, error) { - return ioutil.NopCloser(strings.NewReader("uncompressed")), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase_test.go deleted file mode 100644 index 418b106a..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "testing" - "time" - - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/random" -) - -func layerDigests(t *testing.T, img v1.Image) []string { - var layerDigests []string - layers, err := img.Layers() - if err != nil { - t.Fatalf("oldBase.Layers: %v", err) - } - for i, l := range layers { - dig, err := l.Digest() - if err != nil { - t.Fatalf("layer.Digest %d: %v", i, err) - } - t.Log(dig) - layerDigests = append(layerDigests, dig.String()) - } - return layerDigests -} - -// TestRebase tests that layer digests are expected when performing a rebase on -// random.Image layers. -func TestRebase(t *testing.T) { - // Create a random old base image of 5 layers and get those layers' digests. - oldBase, err := random.Image(100, 5) - if err != nil { - t.Fatalf("random.Image (oldBase): %v", err) - } - t.Log("Old base:") - _ = layerDigests(t, oldBase) - - // Construct an image with 1 layer on top of oldBase. - top, err := random.Image(100, 1) - if err != nil { - t.Fatalf("random.Image (top): %v", err) - } - topLayers, err := top.Layers() - if err != nil { - t.Fatalf("top.Layers: %v", err) - } - topHistory := v1.History{ - Author: "me", - Created: v1.Time{time.Now()}, - CreatedBy: "test", - Comment: "this is a test", - } - orig, err := Append(oldBase, Addendum{ - Layer: topLayers[0], - History: topHistory, - }) - if err != nil { - t.Fatalf("Append: %v", err) - } - - t.Log("Original:") - origLayerDigests := layerDigests(t, orig) - - // Create a random new base image of 3 layers. - newBase, err := random.Image(100, 3) - if err != nil { - t.Fatalf("random.Image (newBase): %v", err) - } - t.Log("New base:") - newBaseLayerDigests := layerDigests(t, newBase) - - // Rebase original image onto new base. - rebased, err := Rebase(orig, oldBase, newBase, nil) - if err != nil { - t.Fatalf("Rebase: %v", err) - } - - var rebasedLayerDigests []string - rebasedBaseLayers, err := rebased.Layers() - if err != nil { - t.Fatalf("rebased.Layers: %v", err) - } - t.Log("Rebased image layer digests:") - for i, l := range rebasedBaseLayers { - dig, err := l.Digest() - if err != nil { - t.Fatalf("layer.Digest (rebased base layer %d): %v", i, err) - } - t.Log(dig) - rebasedLayerDigests = append(rebasedLayerDigests, dig.String()) - } - - // Compare rebased layers. - wantLayerDigests := append(newBaseLayerDigests, origLayerDigests[len(origLayerDigests)-1]) - if len(rebasedLayerDigests) != len(wantLayerDigests) { - t.Fatalf("Rebased image contained %d layers, want %d", len(rebasedLayerDigests), len(wantLayerDigests)) - } - for i, rl := range rebasedLayerDigests { - if got, want := rl, wantLayerDigests[i]; got != want { - t.Errorf("Layer %d mismatch, got %q, want %q", i, got, want) - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel deleted file mode 100644 index 73411c0b..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compressed.go", - "doc.go", - "image.go", - "uncompressed.go", - "with.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/partial", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/v1util:go_default_library", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/random/BUILD.bazel deleted file mode 100644 index 192e24e7..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "image.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/random", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/partial:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/v1util:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["image_test.go"], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/random", -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/image_test.go deleted file mode 100644 index c794b96c..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package random - -import ( - "archive/tar" - "io" - "io/ioutil" - "testing" -) - -func TestManifestAndConfig(t *testing.T) { - want := int64(12) - img, err := Image(1024, want) - if err != nil { - t.Fatalf("Error loading image: %v", err) - } - manifest, err := img.Manifest() - if err != nil { - t.Fatalf("Error loading manifest: %v", err) - } - if got := int64(len(manifest.Layers)); got != want { - t.Fatalf("num layers; got %v, want %v", got, want) - } - - config, err := img.ConfigFile() - if err != nil { - t.Fatalf("Error loading config file: %v", err) - } - if got := int64(len(config.RootFS.DiffIDs)); got != want { - t.Fatalf("num diff ids; got %v, want %v", got, want) - } -} - -func TestTarLayer(t *testing.T) { - img, err := Image(1024, 5) - if err != nil { - t.Fatalf("Image: %v", err) - } - layers, err := img.Layers() - if err != nil { - t.Fatalf("Layers: %v", err) - } - if len(layers) != 5 { - t.Errorf("Got %d layers, want 5", len(layers)) - } - for i, l := range layers { - rc, err := l.Uncompressed() - if err != nil { - t.Errorf("Uncompressed(%d): %v", i, err) - } - defer rc.Close() - tr := tar.NewReader(rc) - if _, err := tr.Next(); err != nil { - t.Errorf("tar.Next: %v", err) - } - - if n, err := io.Copy(ioutil.Discard, tr); err != nil { - t.Errorf("Reading tar layer: %v", err) - } else if n != 1024 { - t.Errorf("Layer %d was %d bytes, want 1024", i, n) - } - - if _, err := tr.Next(); err != io.EOF { - t.Errorf("Layer contained more files; got %v, want EOF", err) - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel deleted file mode 100644 index 1f5b35e1..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel +++ /dev/null @@ -1,48 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "delete.go", - "doc.go", - "error.go", - "image.go", - "list.go", - "mount.go", - "write.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/remote", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/partial:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/v1util:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "delete_test.go", - "error_test.go", - "image_test.go", - "list_test.go", - "write_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/remote", - deps = [ - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/random:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/v1util:go_default_library", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete_test.go deleted file mode 100644 index fd825c07..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -func TestDelete(t *testing.T) { - expectedRepo := "write/time" - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case manifestPath: - if r.Method != http.MethodDelete { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodDelete) - } - http.Error(w, "Deleted", http.StatusOK) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) - if err != nil { - t.Fatalf("NewTag() = %v", err) - } - - if err := Delete(tag, authn.Anonymous, http.DefaultTransport, DeleteOptions{}); err != nil { - t.Errorf("Delete() = %v", err) - } -} - -func TestDeleteBadStatus(t *testing.T) { - expectedRepo := "write/time" - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case manifestPath: - if r.Method != http.MethodDelete { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodDelete) - } - http.Error(w, "Boom Goes Server", http.StatusInternalServerError) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) - if err != nil { - t.Fatalf("NewTag() = %v", err) - } - - if err := Delete(tag, authn.Anonymous, http.DefaultTransport, DeleteOptions{}); err == nil { - t.Error("Delete() = nil; wanted error") - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error_test.go deleted file mode 100644 index 00817884..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "encoding/json" - "net/http" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/google/go-containerregistry/pkg/v1/v1util" -) - -func TestCheckErrorNil(t *testing.T) { - tests := []int{ - http.StatusOK, - http.StatusAccepted, - http.StatusCreated, - http.StatusMovedPermanently, - http.StatusInternalServerError, - } - - for _, code := range tests { - resp := &http.Response{StatusCode: code} - - if err := checkError(resp, code); err != nil { - t.Errorf("checkError(%d) = %v", code, err) - } - } -} - -func TestCheckErrorNotError(t *testing.T) { - tests := []struct { - code int - body string - }{{ - code: http.StatusBadRequest, - body: "", - }, { - code: http.StatusUnauthorized, - body: "Not JSON", - }} - - for _, test := range tests { - resp := &http.Response{ - StatusCode: test.code, - Body: v1util.NopReadCloser(bytes.NewBufferString(test.body)), - } - - if err := checkError(resp, http.StatusOK); err == nil { - t.Errorf("checkError(%d, %s) = nil, wanted error", test.code, test.body) - } else if se, ok := err.(*Error); ok { - t.Errorf("checkError(%d, %s) = %v, wanted another type", test.code, test.body, se) - } - } -} - -func TestCheckErrorWithError(t *testing.T) { - tests := []struct { - code int - error *Error - }{{ - code: http.StatusBadRequest, - error: &Error{ - Errors: []Diagnostic{{ - Code: NameInvalidErrorCode, - Message: "a message for you", - }}, - }, - }, { - code: http.StatusBadRequest, - error: &Error{}, - }, { - code: http.StatusBadRequest, - error: &Error{ - Errors: []Diagnostic{{ - Code: NameInvalidErrorCode, - Message: "a message for you", - }, { - Code: SizeInvalidErrorCode, - Message: "another message for you", - }}, - }, - }} - - for _, test := range tests { - b, err := json.Marshal(test.error) - if err != nil { - t.Errorf("json.Marshal(%v) = %v", test.error, err) - } - resp := &http.Response{ - StatusCode: test.code, - Body: v1util.NopReadCloser(bytes.NewBuffer(b)), - } - - if err := checkError(resp, http.StatusOK); err == nil { - t.Errorf("checkError(%d, %s) = nil, wanted error", test.code, string(b)) - } else if se, ok := err.(*Error); !ok { - t.Errorf("checkError(%d, %s) = %T, wanted *remote.Error", test.code, string(b), se) - } else if diff := cmp.Diff(test.error, se); diff != "" { - t.Errorf("checkError(%d, %s); (-want +got) %s", test.code, string(b), diff) - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image_test.go deleted file mode 100644 index cecc21b1..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image_test.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/random" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -const bogusDigest = "sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" - -func mustDigest(t *testing.T, img v1.Image) v1.Hash { - h, err := img.Digest() - if err != nil { - t.Fatalf("Digest() = %v", err) - } - return h -} - -func mustManifest(t *testing.T, img v1.Image) *v1.Manifest { - m, err := img.Manifest() - if err != nil { - t.Fatalf("Manifest() = %v", err) - } - return m -} - -func mustRawManifest(t *testing.T, img v1.Image) []byte { - m, err := img.RawManifest() - if err != nil { - t.Fatalf("RawManifest() = %v", err) - } - return m -} - -func mustRawConfigFile(t *testing.T, img v1.Image) []byte { - c, err := img.RawConfigFile() - if err != nil { - t.Fatalf("RawConfigFile() = %v", err) - } - return c -} - -func randomImage(t *testing.T) v1.Image { - rnd, err := random.Image(1024, 1) - if err != nil { - t.Fatalf("random.Image() = %v", err) - } - return rnd -} - -func newReference(host, repo, ref string) (name.Reference, error) { - tag, err := name.NewTag(fmt.Sprintf("%s/%s:%s", host, repo, ref), name.WeakValidation) - if err == nil { - return tag, nil - } - return name.NewDigest(fmt.Sprintf("%s/%s@%s", host, repo, ref), name.WeakValidation) -} - -// TODO(jonjohnsonjr): Make this real. -func TestMediaType(t *testing.T) { - img := remoteImage{} - got, err := img.MediaType() - if err != nil { - t.Fatalf("MediaType() = %v", err) - } - want := types.DockerManifestSchema2 - if got != want { - t.Errorf("MediaType() = %v, want %v", got, want) - } -} - -func TestRawManifestDigests(t *testing.T) { - img := randomImage(t) - expectedRepo := "foo/bar" - - cases := []struct { - name string - ref string - responseBody []byte - contentDigest string - wantErr bool - }{{ - name: "normal pull, by tag", - ref: "latest", - responseBody: mustRawManifest(t, img), - contentDigest: mustDigest(t, img).String(), - wantErr: false, - }, { - name: "normal pull, by digest", - ref: mustDigest(t, img).String(), - responseBody: mustRawManifest(t, img), - contentDigest: mustDigest(t, img).String(), - wantErr: false, - }, { - name: "right content-digest, wrong body, by tag", - ref: "latest", - responseBody: []byte("not even json"), - contentDigest: mustDigest(t, img).String(), - wantErr: true, - }, { - name: "right content-digest, wrong body, by digest", - ref: mustDigest(t, img).String(), - responseBody: []byte("not even json"), - contentDigest: mustDigest(t, img).String(), - wantErr: true, - }, { - name: "right body, wrong content-digest, by tag", - ref: "latest", - responseBody: mustRawManifest(t, img), - contentDigest: bogusDigest, - wantErr: true, - }, { - // NB: This succeeds! We don't care what the registry thinks. - name: "right body, wrong content-digest, by digest", - ref: mustDigest(t, img).String(), - responseBody: mustRawManifest(t, img), - contentDigest: bogusDigest, - wantErr: false, - }, { - name: "nothing matches anything", - ref: "latest", - responseBody: []byte("everything is wrong with this"), - contentDigest: bogusDigest, - wantErr: true, - }} - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - manifestPath := fmt.Sprintf("/v2/%s/manifests/%s", expectedRepo, tc.ref) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case manifestPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - - w.Header().Set("Docker-Content-Digest", tc.contentDigest) - w.Write(tc.responseBody) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - ref, err := newReference(u.Host, expectedRepo, tc.ref) - if err != nil { - t.Fatalf("url.Parse(%v, %v, %v) = %v", u.Host, expectedRepo, tc.ref, err) - } - - rmt := remoteImage{ - ref: ref, - client: http.DefaultClient, - } - - if _, err := rmt.RawManifest(); (err != nil) != tc.wantErr { - t.Errorf("RawManifest() wrong error: %v, want %v: %v\n", (err != nil), tc.wantErr, err) - } - }) - } -} - -func TestRawManifestNotFound(t *testing.T) { - expectedRepo := "foo/bar" - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case manifestPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - w.WriteHeader(http.StatusNotFound) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - img := remoteImage{ - ref: mustNewTag(t, fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo)), - client: http.DefaultClient, - } - - if _, err := img.RawManifest(); err == nil { - t.Error("RawManifest() = nil; wanted error") - } -} - -func TestRawConfigFileNotFound(t *testing.T) { - img := randomImage(t) - expectedRepo := "foo/bar" - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - configPath := fmt.Sprintf("/v2/%s/blobs/%s", expectedRepo, mustConfigName(t, img)) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case configPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - w.WriteHeader(http.StatusNotFound) - case manifestPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - w.Write(mustRawManifest(t, img)) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - rmt := remoteImage{ - ref: mustNewTag(t, fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo)), - client: http.DefaultClient, - } - - if _, err := rmt.RawConfigFile(); err == nil { - t.Error("RawConfigFile() = nil; wanted error") - } -} - -func TestAcceptHeaders(t *testing.T) { - img := randomImage(t) - expectedRepo := "foo/bar" - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case manifestPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - if got, want := r.Header.Get("Accept"), string(types.DockerManifestSchema2); got != want { - t.Errorf("Accept header; got %v, want %v", got, want) - } - w.Write(mustRawManifest(t, img)) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - rmt := &remoteImage{ - ref: mustNewTag(t, fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo)), - client: http.DefaultClient, - } - manifest, err := rmt.RawManifest() - if err != nil { - t.Errorf("RawManifest() = %v", err) - } - if got, want := manifest, mustRawManifest(t, img); bytes.Compare(got, want) != 0 { - t.Errorf("RawManifest() = %v, want %v", got, want) - } -} - -func TestImage(t *testing.T) { - img := randomImage(t) - expectedRepo := "foo/bar" - layerDigest := mustManifest(t, img).Layers[0].Digest - layerSize := mustManifest(t, img).Layers[0].Size - configPath := fmt.Sprintf("/v2/%s/blobs/%s", expectedRepo, mustConfigName(t, img)) - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - layerPath := fmt.Sprintf("/v2/%s/blobs/%s", expectedRepo, layerDigest) - manifestReqCount := 0 - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case configPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - w.Write(mustRawConfigFile(t, img)) - case manifestPath: - manifestReqCount++ - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - w.Write(mustRawManifest(t, img)) - case layerPath: - t.Fatalf("BlobSize should not make any request: %v", r.URL.Path) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - tag := mustNewTag(t, fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo)) - rmt, err := Image(tag, authn.Anonymous, http.DefaultTransport) - if err != nil { - t.Errorf("Image() = %v", err) - } - - if got, want := mustRawManifest(t, rmt), mustRawManifest(t, img); bytes.Compare(got, want) != 0 { - t.Errorf("RawManifest() = %v, want %v", got, want) - } - if got, want := mustRawConfigFile(t, rmt), mustRawConfigFile(t, img); bytes.Compare(got, want) != 0 { - t.Errorf("RawConfigFile() = %v, want %v", got, want) - } - // Make sure caching the manifest works. - if manifestReqCount != 1 { - t.Errorf("RawManifest made %v requests, expected 1", manifestReqCount) - } - - l, err := rmt.LayerByDigest(layerDigest) - if err != nil { - t.Errorf("LayerByDigest() = %v", err) - } - // BlobSize should not HEAD. - size, err := l.Size() - if err != nil { - t.Errorf("BlobSize() = %v", err) - } - if got, want := size, layerSize; want != got { - t.Errorf("BlobSize() = %v want %v", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list_test.go deleted file mode 100644 index f46ba494..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -func TestList(t *testing.T) { - cases := []struct { - name string - responseBody []byte - wantErr bool - wantTags []string - }{{ - name: "success", - responseBody: []byte(`{"tags":["foo","bar"]}`), - wantErr: false, - wantTags: []string{"foo", "bar"}, - }, { - name: "not json", - responseBody: []byte("notjson"), - wantErr: true, - }} - - repoName := "ubuntu" - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - tagsPath := fmt.Sprintf("/v2/%s/tags/list", repoName) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case tagsPath: - if r.Method != http.MethodGet { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) - } - - w.Write(tc.responseBody) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - - repo, err := name.NewRepository(fmt.Sprintf("%s/%s", u.Host, repoName), name.WeakValidation) - if err != nil { - t.Fatalf("name.NewRepository(%v) = %v", repoName, err) - } - - tags, err := List(repo, authn.Anonymous, http.DefaultTransport) - if (err != nil) != tc.wantErr { - t.Errorf("List() wrong error: %v, want %v: %v\n", (err != nil), tc.wantErr, err) - } - - if diff := cmp.Diff(tc.wantTags, tags); diff != "" { - t.Errorf("List() wrong tags (-want +got) = %s", diff) - } - }) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel deleted file mode 100644 index 5d2ae8e3..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "basic.go", - "bearer.go", - "doc.go", - "ping.go", - "scheme.go", - "scope.go", - "transport.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/remote/transport", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "basic_test.go", - "bearer_test.go", - "ping_test.go", - "scheme_test.go", - "transport_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/remote/transport", - deps = [ - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic_test.go deleted file mode 100644 index 3bb854c5..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - - "github.com/google/go-containerregistry/pkg/authn" -) - -func TestBasicTransport(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - hdr := r.Header.Get("Authorization") - if !strings.HasPrefix(hdr, "Basic ") { - t.Errorf("Header.Get(Authorization); got %v, want Basic prefix", hdr) - } - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - inner := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - client := http.Client{Transport: &basicTransport{inner: inner, auth: basic, target: "gcr.io"}} - - _, err := client.Get("http://gcr.io/v2/auth") - if err != nil { - t.Errorf("Unexpected error during Get: %v", err) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer_test.go deleted file mode 100644 index 23b1f4f6..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -func TestBearerRefresh(t *testing.T) { - expectedToken := "Sup3rDup3rS3cr3tz" - expectedScope := "this-is-your-scope" - expectedService := "my-service.io" - - cases := []struct { - tokenKey string - wantErr bool - }{{ - tokenKey: "token", - wantErr: false, - }, { - tokenKey: "access_token", - wantErr: false, - }, { - tokenKey: "tolkien", - wantErr: true, - }} - - for _, tc := range cases { - t.Run(tc.tokenKey, func(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - hdr := r.Header.Get("Authorization") - if !strings.HasPrefix(hdr, "Basic ") { - t.Errorf("Header.Get(Authorization); got %v, want Basic prefix", hdr) - } - if got, want := r.FormValue("scope"), expectedScope; got != want { - t.Errorf("FormValue(scope); got %v, want %v", got, want) - } - if got, want := r.FormValue("service"), expectedService; got != want { - t.Errorf("FormValue(service); got %v, want %v", got, want) - } - w.Write([]byte(fmt.Sprintf(`{%q: %q}`, tc.tokenKey, expectedToken))) - })) - defer server.Close() - - basic := &authn.Basic{Username: "foo", Password: "bar"} - registry, err := name.NewRegistry(expectedService, name.WeakValidation) - if err != nil { - t.Errorf("Unexpected error during NewRegistry: %v", err) - } - - bt := &bearerTransport{ - inner: http.DefaultTransport, - basic: basic, - registry: registry, - realm: server.URL, - scopes: []string{expectedScope}, - service: expectedService, - } - - if err := bt.refresh(); (err != nil) != tc.wantErr { - t.Errorf("refresh() = %v", err) - } - }) - } -} - -func TestBearerTransport(t *testing.T) { - expectedToken := "sdkjhfskjdhfkjshdf" - - blobServer := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // We don't expect the blobServer to receive bearer tokens. - if got := r.Header.Get("Authorization"); got != "" { - t.Errorf("Header.Get(Authorization); got %v, want empty string", got) - } - w.WriteHeader(http.StatusOK) - })) - defer blobServer.Close() - - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if got, want := r.Header.Get("Authorization"), "Bearer "+expectedToken; got != want { - t.Errorf("Header.Get(Authorization); got %v, want %v", got, want) - } - - if strings.Contains(r.URL.Path, "blobs") { - http.Redirect(w, r, blobServer.URL, http.StatusFound) - return - } - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - u, err := url.Parse(server.URL) - if err != nil { - t.Errorf("Unexpected error during url.Parse: %v", err) - } - registry, err := name.NewRegistry(u.Host, name.WeakValidation) - if err != nil { - t.Errorf("Unexpected error during NewRegistry: %v", err) - } - - bearer := &authn.Bearer{Token: expectedToken} - client := http.Client{Transport: &bearerTransport{ - inner: &http.Transport{}, - bearer: bearer, - registry: registry, - }} - - _, err = client.Get(fmt.Sprintf("http://%s/v2/auth", u.Host)) - if err != nil { - t.Errorf("Unexpected error during Get: %v", err) - } - - _, err = client.Get(fmt.Sprintf("http://%s/v2/foo/bar/blobs/blah", u.Host)) - if err != nil { - t.Errorf("Unexpected error during Get: %v", err) - } -} - -// TODO(mattmoor): 401 response prompts a refresh (NYI) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping_test.go deleted file mode 100644 index b122b2b0..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/google/go-containerregistry/pkg/name" -) - -var ( - testRegistry, _ = name.NewRegistry("localhost:8080", name.StrictValidation) -) - -func TestChallengeParsing(t *testing.T) { - tests := []struct { - input string - output map[string]string - }{{ - input: `foo="bar"`, - output: map[string]string{ - "foo": "bar", - }, - }, { - input: `foo`, - output: map[string]string{ - "foo": "", - }, - }, { - input: `foo="bar",baz="blah"`, - output: map[string]string{ - "foo": "bar", - "baz": "blah", - }, - }, { - input: `baz="blah", foo="bar"`, - output: map[string]string{ - "foo": "bar", - "baz": "blah", - }, - }, { - input: `realm="https://gcr.io/v2/token", service="gcr.io", scope="repository:foo/bar:pull"`, - output: map[string]string{ - "realm": "https://gcr.io/v2/token", - "service": "gcr.io", - "scope": "repository:foo/bar:pull", - }, - }} - - for _, test := range tests { - params := parseChallenge(test.input) - if diff := cmp.Diff(test.output, params); diff != "" { - t.Errorf("parseChallenge(%s); (-want +got) %s", test.input, diff) - } - } -} - -func TestPingNoChallenge(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - pr, err := ping(testRegistry, tprt) - if err != nil { - t.Errorf("ping() = %v", err) - } - if pr.challenge != anonymous { - t.Errorf("ping(); got %v, want %v", pr.challenge, anonymous) - } -} - -func TestPingBasicChallengeNoParams(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `BASIC`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - pr, err := ping(testRegistry, tprt) - if err != nil { - t.Errorf("ping() = %v", err) - } - if pr.challenge != basic { - t.Errorf("ping(); got %v, want %v", pr.challenge, basic) - } - if got, want := len(pr.parameters), 0; got != want { - t.Errorf("ping(); got %v, want %v", got, want) - } -} - -func TestPingBearerChallengeWithParams(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `Bearer realm="http://auth.foo.io/token`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - pr, err := ping(testRegistry, tprt) - if err != nil { - t.Errorf("ping() = %v", err) - } - if pr.challenge != bearer { - t.Errorf("ping(); got %v, want %v", pr.challenge, bearer) - } - if got, want := len(pr.parameters), 1; got != want { - t.Errorf("ping(); got %v, want %v", got, want) - } -} - -func TestUnsupportedStatus(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `Bearer realm="http://auth.foo.io/token`) - http.Error(w, "Forbidden", http.StatusForbidden) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - pr, err := ping(testRegistry, tprt) - if err == nil { - t.Errorf("ping() = %v", pr) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scheme_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scheme_test.go deleted file mode 100644 index c74db688..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scheme_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "testing" - - "github.com/google/go-containerregistry/pkg/name" -) - -func TestSheme(t *testing.T) { - tests := []struct { - domain string - scheme string - }{{ - domain: "foo.svc.local:1234", - scheme: "http", - }, { - domain: "127.0.0.1:1234", - scheme: "http", - }, { - domain: "127.0.0.1", - scheme: "http", - }, { - domain: "localhost:8080", - scheme: "http", - }, { - domain: "gcr.io", - scheme: "https", - }, { - domain: "index.docker.io", - scheme: "https", - }} - - for _, test := range tests { - reg, err := name.NewRegistry(test.domain, name.WeakValidation) - if err != nil { - t.Errorf("NewRegistry(%s) = %v", test.domain, err) - } - if got, want := Scheme(reg), test.scheme; got != want { - t.Errorf("scheme(%v); got %v, want %v", reg, got, want) - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport_test.go deleted file mode 100644 index 1dcc756a..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -var ( - testReference, _ = name.NewTag("localhost:8080/user/image:latest", name.StrictValidation) -) - -func TestTransportSelectionAnonymous(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err != nil { - t.Errorf("New() = %v", err) - } - // We should get back an unmodified transport - if tp != tprt { - t.Errorf("New(); got %v, want %v", tp, tprt) - } -} - -func TestTransportSelectionBasic(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `Basic`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err != nil { - t.Errorf("New() = %v", err) - } - if _, ok := tp.(*basicTransport); !ok { - t.Errorf("New(); got %T, want *basicTransport", tp) - } -} - -func TestTransportSelectionBearer(t *testing.T) { - request := 0 - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - request = request + 1 - switch request { - case 1: - w.Header().Set("WWW-Authenticate", `Bearer realm="http://foo.io"`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - case 2: - hdr := r.Header.Get("Authorization") - if !strings.HasPrefix(hdr, "Basic ") { - t.Errorf("Header.Get(Authorization); got %v, want Basic prefix", hdr) - } - if got, want := r.FormValue("scope"), testReference.Scope(string(PullScope)); got != want { - t.Errorf("FormValue(scope); got %v, want %v", got, want) - } - // Check that we get the default value (we didn't specify it above) - if got, want := r.FormValue("service"), testReference.RegistryStr(); got != want { - t.Errorf("FormValue(service); got %v, want %v", got, want) - } - w.Write([]byte(`{"token": "dfskdjhfkhsjdhfkjhsdf"}`)) - } - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err != nil { - t.Errorf("New() = %v", err) - } - if _, ok := tp.(*bearerTransport); !ok { - t.Errorf("New(); got %T, want *bearerTransport", tp) - } -} - -func TestTransportSelectionBearerMissingRealm(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `Bearer service="gcr.io"`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err == nil || !strings.Contains(err.Error(), "missing realm") { - t.Errorf("New() = %v, %v", tp, err) - } -} - -func TestTransportSelectionBearerAuthError(t *testing.T) { - request := 0 - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - request = request + 1 - switch request { - case 1: - w.Header().Set("WWW-Authenticate", `Bearer realm="http://foo.io"`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - case 2: - http.Error(w, "Oops", http.StatusInternalServerError) - } - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err == nil { - t.Errorf("New() = %v", tp) - } -} - -func TestTransportSelectionUnrecognizedChallenge(t *testing.T) { - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("WWW-Authenticate", `Unrecognized`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - })) - defer server.Close() - tprt := &http.Transport{ - Proxy: func(req *http.Request) (*url.URL, error) { - return url.Parse(server.URL) - }, - } - - basic := &authn.Basic{Username: "foo", Password: "bar"} - tp, err := New(testReference.Context().Registry, basic, tprt, []string{testReference.Scope(PullScope)}) - if err == nil || !strings.Contains(err.Error(), "challenge") { - t.Errorf("New() = %v, %v", tp, err) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write_test.go deleted file mode 100644 index 4849c8c2..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write_test.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/random" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -func mustNewTag(t *testing.T, s string) name.Tag { - tag, err := name.NewTag(s, name.WeakValidation) - if err != nil { - t.Fatalf("NewTag(%v) = %v", s, err) - } - return tag -} - -func TestUrl(t *testing.T) { - tests := []struct { - tag string - path string - url string - }{{ - tag: "gcr.io/foo/bar:latest", - path: "/v2/foo/bar/manifests/latest", - url: "https://gcr.io/v2/foo/bar/manifests/latest", - }, { - tag: "localhost:8080/foo/bar:baz", - path: "/v2/foo/bar/blobs/upload", - url: "http://localhost:8080/v2/foo/bar/blobs/upload", - }} - - for _, test := range tests { - w := &writer{ - ref: mustNewTag(t, test.tag), - } - if got, want := w.url(test.path), test.url; got.String() != want { - t.Errorf("url(%v) = %v, want %v", test.path, got.String(), want) - } - } -} - -func TestNextLocation(t *testing.T) { - tests := []struct { - location string - url string - }{{ - location: "https://gcr.io/v2/foo/bar/blobs/uploads/1234567?baz=blah", - url: "https://gcr.io/v2/foo/bar/blobs/uploads/1234567?baz=blah", - }, { - location: "/v2/foo/bar/blobs/uploads/1234567?baz=blah", - url: "https://gcr.io/v2/foo/bar/blobs/uploads/1234567?baz=blah", - }} - - ref := mustNewTag(t, "gcr.io/foo/bar:latest") - w := &writer{ - ref: ref, - } - - for _, test := range tests { - resp := &http.Response{ - Header: map[string][]string{ - "Location": []string{test.location}, - }, - Request: &http.Request{ - URL: &url.URL{ - Scheme: transport.Scheme(ref.Registry), - Host: ref.RegistryStr(), - }, - }, - } - - got, err := w.nextLocation(resp) - if err != nil { - t.Errorf("nextLocation(%v) = %v", resp, err) - } - want := test.url - if got != want { - t.Errorf("nextLocation(%v) = %v, want %v", resp, got, want) - } - } -} - -type closer interface { - Close() -} - -func setupImage(t *testing.T) v1.Image { - rnd, err := random.Image(1024, 1) - if err != nil { - t.Fatalf("random.Image() = %v", err) - } - return rnd -} - -func mustConfigName(t *testing.T, img v1.Image) v1.Hash { - h, err := img.ConfigName() - if err != nil { - t.Fatalf("ConfigName() = %v", err) - } - return h -} - -func setupWriter(repo string, img v1.Image, handler http.HandlerFunc) (*writer, closer, error) { - server := httptest.NewServer(handler) - u, err := url.Parse(server.URL) - if err != nil { - server.Close() - return nil, nil, err - } - tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, repo), name.WeakValidation) - if err != nil { - server.Close() - return nil, nil, err - } - - return &writer{ - ref: tag, - img: img, - client: http.DefaultClient, - }, server, nil -} - -func TestInitiateUploadNoMountsExists(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedRepo := "foo/bar" - expectedPath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - expectedQuery := url.Values{ - "mount": []string{h.String()}, - }.Encode() - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - if r.URL.RawQuery != expectedQuery { - t.Errorf("RawQuery; got %v, want %v", r.URL.RawQuery, expectedQuery) - } - http.Error(w, "Mounted", http.StatusCreated) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - _, mounted, err := w.initiateUpload(h) - if err != nil { - t.Errorf("intiateUpload() = %v", err) - } - if !mounted { - t.Error("initiateUpload() = !mounted, want mounted") - } -} - -func TestInitiateUploadNoMountsInitiated(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedRepo := "baz/blah" - expectedPath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - expectedQuery := url.Values{ - "mount": []string{h.String()}, - }.Encode() - expectedLocation := "https://somewhere.io/upload?foo=bar" - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - if r.URL.RawQuery != expectedQuery { - t.Errorf("RawQuery; got %v, want %v", r.URL.RawQuery, expectedQuery) - } - w.Header().Set("Location", expectedLocation) - http.Error(w, "Initiated", http.StatusAccepted) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - location, mounted, err := w.initiateUpload(h) - if err != nil { - t.Errorf("intiateUpload() = %v", err) - } - if mounted { - t.Error("initiateUpload() = mounted, want !mounted") - } - if location != expectedLocation { - t.Errorf("initiateUpload(); got %v, want %v", location, expectedLocation) - } -} - -func TestInitiateUploadNoMountsBadStatus(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedRepo := "ugh/another" - expectedPath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - expectedQuery := url.Values{ - "mount": []string{h.String()}, - }.Encode() - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - if r.URL.RawQuery != expectedQuery { - t.Errorf("RawQuery; got %v, want %v", r.URL.RawQuery, expectedQuery) - } - http.Error(w, "Unknown", http.StatusNoContent) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - location, mounted, err := w.initiateUpload(h) - if err == nil { - t.Errorf("intiateUpload() = %v, %v; wanted error", location, mounted) - } -} - -func TestInitiateUploadMountsWithMount(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedMountRepo := "a/different/repo" - expectedRepo := "yet/again" - expectedPath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - expectedQuery := url.Values{ - "mount": []string{h.String()}, - "from": []string{expectedMountRepo}, - }.Encode() - - img = &mountableImage{ - Image: img, - Repository: mustNewTag(t, fmt.Sprintf("gcr.io/%s", expectedMountRepo)).Repository, - } - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - if r.URL.RawQuery != expectedQuery { - t.Errorf("RawQuery; got %v, want %v", r.URL.RawQuery, expectedQuery) - } - http.Error(w, "Mounted", http.StatusCreated) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - _, mounted, err := w.initiateUpload(h) - if err != nil { - t.Errorf("intiateUpload() = %v", err) - } - if !mounted { - t.Error("initiateUpload() = !mounted, want mounted") - } -} - -func TestStreamBlob(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedPath := "/vWhatever/I/decide" - expectedCommitLocation := "https://commit.io/v12/blob" - - w, closer, err := setupWriter("what/ever", img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPatch { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPatch) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - got, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("ReadAll(Body) = %v", err) - } - want, err := img.RawConfigFile() - if err != nil { - t.Errorf("RawConfigFile() = %v", err) - } - if bytes.Compare(got, want) != 0 { - t.Errorf("bytes.Compare(); got %v, want %v", got, want) - } - w.Header().Set("Location", expectedCommitLocation) - http.Error(w, "Created", http.StatusCreated) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - streamLocation := w.url(expectedPath) - - commitLocation, err := w.streamBlob(h, streamLocation.String()) - if err != nil { - t.Errorf("streamBlob() = %v", err) - } - if commitLocation != expectedCommitLocation { - t.Errorf("streamBlob(); got %v, want %v", commitLocation, expectedCommitLocation) - } -} - -func TestCommitBlob(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedPath := "/no/commitment/issues" - expectedQuery := url.Values{ - "digest": []string{h.String()}, - }.Encode() - - w, closer, err := setupWriter("what/ever", img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPut) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - if r.URL.RawQuery != expectedQuery { - t.Errorf("RawQuery; got %v, want %v", r.URL.RawQuery, expectedQuery) - } - http.Error(w, "Created", http.StatusCreated) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - commitLocation := w.url(expectedPath) - - if err := w.commitBlob(h, commitLocation.String()); err != nil { - t.Errorf("commitBlob() = %v", err) - } -} - -func TestUploadOne(t *testing.T) { - img := setupImage(t) - h := mustConfigName(t, img) - expectedRepo := "baz/blah" - initiatePath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - streamPath := "/path/to/upload" - commitPath := "/path/to/commit" - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case initiatePath: - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - w.Header().Set("Location", streamPath) - http.Error(w, "Initiated", http.StatusAccepted) - case streamPath: - if r.Method != http.MethodPatch { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPatch) - } - got, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("ReadAll(Body) = %v", err) - } - want, err := img.RawConfigFile() - if err != nil { - t.Errorf("RawConfigFile() = %v", err) - } - if bytes.Compare(got, want) != 0 { - t.Errorf("bytes.Compare(); got %v, want %v", got, want) - } - w.Header().Set("Location", commitPath) - http.Error(w, "Initiated", http.StatusAccepted) - case commitPath: - if r.Method != http.MethodPut { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPut) - } - http.Error(w, "Created", http.StatusCreated) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - if err := w.uploadOne(h); err != nil { - t.Errorf("uploadOne() = %v", err) - } -} - -func TestCommitImage(t *testing.T) { - img := setupImage(t) - - expectedRepo := "foo/bar" - expectedPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - - w, closer, err := setupWriter(expectedRepo, img, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPut) - } - if r.URL.Path != expectedPath { - t.Errorf("URL; got %v, want %v", r.URL.Path, expectedPath) - } - got, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("ReadAll(Body) = %v", err) - } - want, err := img.RawManifest() - if err != nil { - t.Errorf("RawManifest() = %v", err) - } - if bytes.Compare(got, want) != 0 { - t.Errorf("bytes.Compare(); got %v, want %v", got, want) - } - mt, err := img.MediaType() - if err != nil { - t.Errorf("MediaType() = %v", err) - } - if got, want := r.Header.Get("Content-Type"), string(mt); got != want { - t.Errorf("Header; got %v, want %v", got, want) - } - http.Error(w, "Created", http.StatusCreated) - })) - if err != nil { - t.Fatalf("setupWriter() = %v", err) - } - defer closer.Close() - - if err := w.commitImage(); err != nil { - t.Errorf("commitBlob() = %v", err) - } -} - -func TestWrite(t *testing.T) { - img := setupImage(t) - expectedRepo := "write/time" - initiatePath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case initiatePath: - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - http.Error(w, "Mounted", http.StatusCreated) - case manifestPath: - if r.Method != http.MethodPut { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPut) - } - http.Error(w, "Created", http.StatusCreated) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) - if err != nil { - t.Fatalf("NewTag() = %v", err) - } - - if err := Write(tag, img, authn.Anonymous, http.DefaultTransport, WriteOptions{}); err != nil { - t.Errorf("Write() = %v", err) - } -} - -func TestWriteWithErrors(t *testing.T) { - img := setupImage(t) - expectedRepo := "write/time" - initiatePath := fmt.Sprintf("/v2/%s/blobs/uploads/", expectedRepo) - - expectedError := &Error{ - Errors: []Diagnostic{{ - Code: NameInvalidErrorCode, - Message: "some explanation of how things were messed up.", - }}, - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/v2/": - w.WriteHeader(http.StatusOK) - case initiatePath: - if r.Method != http.MethodPost { - t.Errorf("Method; got %v, want %v", r.Method, http.MethodPost) - } - b, err := json.Marshal(expectedError) - if err != nil { - t.Fatalf("json.Marshal() = %v", err) - } - w.WriteHeader(http.StatusBadRequest) - w.Write(b) - default: - t.Fatalf("Unexpected path: %v", r.URL.Path) - } - })) - defer server.Close() - u, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("url.Parse(%v) = %v", server.URL, err) - } - tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) - if err != nil { - t.Fatalf("NewTag() = %v", err) - } - - if err := Write(tag, img, authn.Anonymous, http.DefaultTransport, WriteOptions{}); err == nil { - t.Error("Write() = nil; wanted error") - } else if se, ok := err.(*Error); !ok { - t.Errorf("Write() = %T; wanted *remote.Error", se) - } else if diff := cmp.Diff(expectedError, se); diff != "" { - t.Errorf("Write(); (-want +got) = %s", diff) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/BUILD.bazel deleted file mode 100644 index 906168eb..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/BUILD.bazel +++ /dev/null @@ -1,87 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "image.go", - "layer.go", - "write.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/tarball", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/partial:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/v1util:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "image_test.go", - "layer_test.go", - "write_test.go", - ], - data = glob(["testdata/**"]) + [ - ":test_image_1.tar", # keep - ":test_image_2.tar", # keep - ":test_bundle.tar", # keep - ":content.tar", # keep - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/tarball", - deps = [ - "//vendor/github.com/google/go-cmp/cmp:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library", - "//vendor/github.com/google/go-containerregistry/pkg/v1/random:go_default_library", - ], -) - -load( - "@io_bazel_rules_docker//container:container.bzl", - "container_image", - "container_bundle", -) - -container_image( - name = "test_image_1", - files = [ - "testdata/bar", - "testdata/foo", - ], - visibility = ["//visibility:public"], -) - -container_image( - name = "test_image_2", - files = [ - "testdata/bat/bat", - "testdata/baz", - ], -) - -container_bundle( - name = "test_bundle", - images = { - "test_image_1": ":test_image_1", - "test_image_2": ":test_image_2", - }, -) - -load( - "@bazel_tools//tools/build_defs/pkg:pkg.bzl", - "pkg_tar", -) - -pkg_tar( - name = "content", - srcs = [ - "testdata/bat/bat", - "testdata/baz", - ], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image_test.go deleted file mode 100644 index df623ba0..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "testing" - - "github.com/google/go-containerregistry/pkg/name" -) - -func TestManifestAndConfig(t *testing.T) { - img, err := ImageFromPath("test_image_1.tar", nil) - if err != nil { - t.Fatalf("Error loading image: %v", err) - } - manifest, err := img.Manifest() - if err != nil { - t.Fatalf("Error loading manifest: %v", err) - } - if len(manifest.Layers) != 1 { - t.Fatalf("layers should be 1, got %d", len(manifest.Layers)) - } - - config, err := img.ConfigFile() - if err != nil { - t.Fatalf("Error loading config file: %v", err) - } - if len(config.History) != 1 { - t.Fatalf("history length should be 1, got %d", len(config.History)) - } -} - -func TestNoManifest(t *testing.T) { - img, err := ImageFromPath("testdata/no_manifest.tar", nil) - if err == nil { - t.Fatalf("Error expected loading image: %v", img) - } -} - -func TestBundleSingle(t *testing.T) { - img, err := ImageFromPath("test_bundle.tar", nil) - if err == nil { - t.Fatalf("Error expected loading image: %v", img) - } -} - -func TestBundleMultiple(t *testing.T) { - for _, imgName := range []string{ - "test_image_1", - "test_image_2", - "test_image_1:latest", - "test_image_2:latest", - "index.docker.io/library/test_image_1:latest", - } { - t.Run(imgName, func(t *testing.T) { - tag, err := name.NewTag(imgName, name.WeakValidation) - if err != nil { - t.Fatalf("Error creating tag: %v", err) - } - img, err := ImageFromPath("test_bundle.tar", &tag) - if err != nil { - t.Fatalf("Error loading image: %v", err) - } - if _, err := img.Manifest(); err != nil { - t.Fatalf("Unexpected error loading manifest: %v", err) - } - }) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer_test.go deleted file mode 100644 index 5dc49529..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "bytes" - "compress/gzip" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-containerregistry/pkg/v1" -) - -func TestLayerFromFile(t *testing.T) { - setupFixtures(t) - - tarLayer, err := LayerFromFile("content.tar") - if err != nil { - t.Fatalf("Unable to create layer from tar file: %v", err) - } - - tarGzLayer, err := LayerFromFile("gzip_content.tgz") - if err != nil { - t.Fatalf("Unable to create layer from compressed tar file: %v", err) - } - - assertDigestsAreEqual(t, tarLayer, tarGzLayer) - assertDiffIDsAreEqual(t, tarLayer, tarGzLayer) - assertCompressedStreamsAreEqual(t, tarLayer, tarGzLayer) - assertUncompressedStreamsAreEqual(t, tarLayer, tarGzLayer) - assertSizesAreEqual(t, tarLayer, tarGzLayer) -} - -func TestLayerFromReader(t *testing.T) { - setupFixtures(t) - - ucBytes, err := ioutil.ReadFile("content.tar") - if err != nil { - t.Fatalf("Unable to read tar file: %v", err) - } - ucOpener := func() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(ucBytes)), nil - } - tarLayer, err := LayerFromOpener(ucOpener) - if err != nil { - t.Fatalf("Unable to create layer from tar file: %v", err) - } - - gzBytes, err := ioutil.ReadFile("gzip_content.tgz") - if err != nil { - t.Fatalf("Unable to read tar file: %v", err) - } - gzOpener := func() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(gzBytes)), nil - } - tarGzLayer, err := LayerFromOpener(gzOpener) - if err != nil { - t.Fatalf("Unable to create layer from tar file: %v", err) - } - - assertDigestsAreEqual(t, tarLayer, tarGzLayer) - assertDiffIDsAreEqual(t, tarLayer, tarGzLayer) - assertCompressedStreamsAreEqual(t, tarLayer, tarGzLayer) - assertUncompressedStreamsAreEqual(t, tarLayer, tarGzLayer) - assertSizesAreEqual(t, tarLayer, tarGzLayer) -} - -func assertDigestsAreEqual(t *testing.T, a, b v1.Layer) { - t.Helper() - - sa, err := a.Digest() - if err != nil { - t.Fatalf("Unable to fetch digest for layer: %v", err) - } - - sb, err := b.Digest() - if err != nil { - t.Fatalf("Unable to fetch digest for layer: %v", err) - } - - if sa != sb { - t.Fatalf("Digest of each layer is different - %v != %v", sa, sb) - } -} - -func assertDiffIDsAreEqual(t *testing.T, a, b v1.Layer) { - t.Helper() - - sa, err := a.DiffID() - if err != nil { - t.Fatalf("Unable to fetch diffID for layer: %v", err) - } - - sb, err := b.DiffID() - if err != nil { - t.Fatalf("Unable to fetch diffID for layer: %v", err) - } - - if sa != sb { - t.Fatalf("diffID of each layer is different - %v != %v", sa, sb) - } -} - -func assertCompressedStreamsAreEqual(t *testing.T, a, b v1.Layer) { - t.Helper() - - sa, err := a.Compressed() - if err != nil { - t.Fatalf("Unable to fetch compressed for layer: %v", err) - } - - saBytes, err := ioutil.ReadAll(sa) - if err != nil { - t.Fatalf("Unable to read bytes for layer: %v", err) - } - - sb, err := b.Compressed() - if err != nil { - t.Fatalf("Unable to fetch compressed for layer: %v", err) - } - - sbBytes, err := ioutil.ReadAll(sb) - if err != nil { - t.Fatalf("Unable to read bytes for layer: %v", err) - } - - if diff := cmp.Diff(saBytes, sbBytes); diff != "" { - t.Fatalf("Compressed streams were different: %v", diff) - } -} - -func assertUncompressedStreamsAreEqual(t *testing.T, a, b v1.Layer) { - t.Helper() - - sa, err := a.Uncompressed() - if err != nil { - t.Fatalf("Unable to fetch uncompressed for layer: %v", err) - } - - saBytes, err := ioutil.ReadAll(sa) - if err != nil { - t.Fatalf("Unable to read bytes for layer: %v", err) - } - - sb, err := b.Uncompressed() - if err != nil { - t.Fatalf("Unable to fetch uncompressed for layer: %v", err) - } - - sbBytes, err := ioutil.ReadAll(sb) - if err != nil { - t.Fatalf("Unable to read bytes for layer: %v", err) - } - - if diff := cmp.Diff(saBytes, sbBytes); diff != "" { - t.Fatalf("Uncompressed streams were different: %v", diff) - } -} - -func assertSizesAreEqual(t *testing.T, a, b v1.Layer) { - t.Helper() - - sa, err := a.Size() - if err != nil { - t.Fatalf("Unable to fetch size for layer: %v", err) - } - - sb, err := b.Size() - if err != nil { - t.Fatalf("Unable to fetch size for layer: %v", err) - } - - if sa != sb { - t.Fatalf("Size of each layer is different - %d != %d", sa, sb) - } -} - -// Compression settings matter in order for the digest, size, -// compressed assertions to pass -// -// Since our v1util.GzipReadCloser uses gzip.BestSpeed -// we need our fixture to use the same - bazel's pkg_tar doesn't -// seem to let you control compression settings -func setupFixtures(t *testing.T) { - t.Helper() - - in, err := os.Open("content.tar") - if err != nil { - t.Errorf("Error setting up fixtures: %v", err) - } - - defer in.Close() - - out, err := os.Create("gzip_content.tgz") - if err != nil { - t.Errorf("Error setting up fixtures: %v", err) - } - - defer out.Close() - - gw, _ := gzip.NewWriterLevel(out, gzip.BestSpeed) - defer gw.Close() - - _, err = io.Copy(gw, in) - if err != nil { - t.Errorf("Error setting up fixtures: %v", err) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write_test.go deleted file mode 100644 index 2465a308..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/random" -) - -func TestWrite(t *testing.T) { - // Make a tempfile for tarball writes. - fp, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Error creating temp file.") - } - t.Log(fp.Name()) - defer fp.Close() - defer os.Remove(fp.Name()) - - // Make a random image - randImage, err := random.Image(256, 8) - if err != nil { - t.Fatalf("Error creating random image.") - } - tag, err := name.NewTag("gcr.io/foo/bar:latest", name.StrictValidation) - if err != nil { - t.Fatalf("Error creating test tag.") - } - if err := WriteToFile(fp.Name(), tag, randImage, nil); err != nil { - t.Fatalf("Unexpected error writing tarball: %v", err) - } - - // Make sure the image is valid and can be loaded. - // Load it both by nil and by its name. - for _, it := range []*name.Tag{nil, &tag} { - tarImage, err := ImageFromPath(fp.Name(), it) - if err != nil { - t.Fatalf("Unexpected error reading tarball: %v", err) - } - - tarManifest, err := tarImage.Manifest() - if err != nil { - t.Fatalf("Unexpected error reading tarball: %v", err) - } - randManifest, err := randImage.Manifest() - if err != nil { - t.Fatalf("Unexpected error reading tarball: %v", err) - } - - if diff := cmp.Diff(randManifest, tarManifest); diff != "" { - t.Errorf("Manifests not equal. (-rand +tar) %s", diff) - } - - assertImageLayersMatchManifestLayers(t, tarImage) - assertLayersAreIdentical(t, randImage, tarImage) - } - - // Try loading a different tag, it should error. - fakeTag, err := name.NewTag("gcr.io/notthistag:latest", name.StrictValidation) - if err != nil { - t.Fatalf("Error generating tag: %v", err) - } - if _, err := ImageFromPath(fp.Name(), &fakeTag); err == nil { - t.Errorf("Expected error loading tag %v from image", fakeTag) - } -} - -func assertImageLayersMatchManifestLayers(t *testing.T, i v1.Image) { - t.Helper() - - layers, err := i.Layers() - if err != nil { - t.Fatalf("error getting layers: %v", err) - } - - digestsFromImage := make([]v1.Hash, len(layers)) - - for i, layer := range layers { - digest, err := layer.Digest() - if err != nil { - t.Fatalf("error getting digests: %v", err) - } - digestsFromImage[i] = digest - } - - m, err := i.Manifest() - if err != nil { - t.Fatalf("error getting layers to compare: %v", err) - } - - digestsFromManifest := make([]v1.Hash, 0, len(m.Layers)) - for _, layer := range m.Layers { - digestsFromManifest = append(digestsFromManifest, layer.Digest) - } - - if diff := cmp.Diff(digestsFromImage, digestsFromManifest); diff != "" { - t.Fatalf("image.Layers() are not in the same order as "+ - "the image.Manifest().Layers (-image +manifest) %s", diff) - } -} - -func assertLayersAreIdentical(t *testing.T, a, b v1.Image) { - t.Helper() - - aLayers, err := a.Layers() - if err != nil { - t.Fatalf("error getting layers to compare: %v", err) - } - - bLayers, err := b.Layers() - if err != nil { - t.Fatalf("error getting layers to compare: %v", err) - } - - if diff := cmp.Diff(getDigests(t, aLayers), getDigests(t, bLayers)); diff != "" { - t.Fatalf("layers digests are not identical (-rand +tar) %s", diff) - } - - if diff := cmp.Diff(getDiffIDs(t, aLayers), getDiffIDs(t, bLayers)); diff != "" { - t.Fatalf("layers digests are not identical (-rand +tar) %s", diff) - } -} - -func getDigests(t *testing.T, layers []v1.Layer) []v1.Hash { - t.Helper() - - digests := make([]v1.Hash, 0, len(layers)) - for _, layer := range layers { - digest, err := layer.Digest() - if err != nil { - t.Fatalf("error getting digests: %s", err) - } - digests = append(digests, digest) - } - - return digests -} - -func getDiffIDs(t *testing.T, layers []v1.Layer) []v1.Hash { - t.Helper() - - diffIDs := make([]v1.Hash, 0, len(layers)) - for _, layer := range layers { - diffID, err := layer.DiffID() - if err != nil { - t.Fatalf("error getting diffID: %s", err) - } - diffIDs = append(diffIDs, diffID) - } - - return diffIDs -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel deleted file mode 100644 index 1980da59..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["types.go"], - importpath = "github.com/google/go-containerregistry/pkg/v1/types", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/BUILD.bazel deleted file mode 100644 index a9529b18..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "and_closer.go", - "nop.go", - "verify.go", - "zip.go", - ], - importpath = "github.com/google/go-containerregistry/pkg/v1/v1util", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "and_closer_test.go", - "verify_test.go", - "zip_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/google/go-containerregistry/pkg/v1/v1util", - deps = ["//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library"], -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer_test.go deleted file mode 100644 index 7e42f95b..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "bytes" - "io/ioutil" - "testing" -) - -func TestRead(t *testing.T) { - want := "asdf" - r := bytes.NewBufferString(want) - called := false - - rac := &readAndCloser{ - Reader: r, - CloseFunc: func() error { - called = true - return nil - }, - } - - data, err := ioutil.ReadAll(rac) - if err != nil { - t.Errorf("ReadAll(rac) = %v", err) - } - if got := string(data); got != want { - t.Errorf("ReadAll(rac); got %q, want %q", got, want) - } - - if called { - t.Error("called before Close(); got true, wanted false") - } - if err := rac.Close(); err != nil { - t.Errorf("Close() = %v", err) - } - if !called { - t.Error("called after Close(); got false, wanted true") - } -} - -func TestWrite(t *testing.T) { - w := bytes.NewBuffer([]byte{}) - called := false - - wac := &writeAndCloser{ - Writer: w, - CloseFunc: func() error { - called = true - return nil - }, - } - - want := "asdf" - if _, err := wac.Write([]byte(want)); err != nil { - t.Errorf("Write(%q); = %v", want, err) - } - - if called { - t.Error("called before Close(); got true, wanted false") - } - if err := wac.Close(); err != nil { - t.Errorf("Close() = %v", err) - } - if !called { - t.Error("called after Close(); got false, wanted true") - } - - if got := w.String(); got != want { - t.Errorf("w.String(); got %q, want %q", got, want) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify_test.go deleted file mode 100644 index 2d96a41e..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/google/go-containerregistry/pkg/v1" -) - -func mustHash(s string, t *testing.T) v1.Hash { - h, _, err := v1.SHA256(strings.NewReader(s)) - if err != nil { - t.Fatalf("SHA256(%s) = %v", s, err) - } - return h -} - -func TestVerificationFailure(t *testing.T) { - want := "This is the input string." - buf := bytes.NewBufferString(want) - - verified, err := VerifyReadCloser(NopReadCloser(buf), mustHash("not the same", t)) - if err != nil { - t.Fatalf("VerifyReadCloser() = %v", err) - } - if b, err := ioutil.ReadAll(verified); err == nil { - t.Errorf("ReadAll() = %q; want verification error", string(b)) - } -} - -func TestVerification(t *testing.T) { - want := "This is the input string." - buf := bytes.NewBufferString(want) - - verified, err := VerifyReadCloser(NopReadCloser(buf), mustHash(want, t)) - if err != nil { - t.Fatalf("VerifyReadCloser() = %v", err) - } - if _, err := ioutil.ReadAll(verified); err != nil { - t.Errorf("ReadAll() = %v", err) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip_test.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip_test.go deleted file mode 100644 index 0992dd52..00000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "bytes" - "io/ioutil" - "testing" -) - -func TestReader(t *testing.T) { - want := "This is the input string." - buf := bytes.NewBufferString(want) - zipped, err := GzipReadCloser(NopReadCloser(buf)) - if err != nil { - t.Errorf("GzipReadCloser() = %v", err) - } - unzipped, err := GunzipReadCloser(zipped) - if err != nil { - t.Errorf("GunzipReadCloser() = %v", err) - } - - b, err := ioutil.ReadAll(unzipped) - if err != nil { - t.Errorf("ReadAll() = %v", err) - } - if got := string(b); got != want { - t.Errorf("ReadAll(); got %q, want %q", got, want) - } -} - -func TestWriter(t *testing.T) { - result := bytes.NewBuffer(nil) - unzipper, err := GunzipWriteCloser(NopWriteCloser(result)) - if err != nil { - t.Errorf("GunzipReadCloser() = %v", err) - } - zipper := GzipWriteCloser(unzipper) - - want := "This is the input string." - if _, err := zipper.Write([]byte(want)); err != nil { - t.Errorf("Write(%q) = %v", want, err) - } - if err := zipper.Close(); err != nil { - t.Errorf("Close() = %v", err) - } - - if got := result.String(); got != want { - t.Errorf("String(); got %q, want %q", got, want) - } -} diff --git a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel b/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel deleted file mode 100644 index bf68f4c4..00000000 --- a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = select({ - "@io_bazel_rules_go//go/platform:android": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "trap_others.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "trap_windows.go", - "trap_windows_1.4.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/inconshreveable/mousetrap", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d17..00000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/license b/vendor/github.com/konsorten/go-windows-terminal-sequences/license new file mode 100644 index 00000000..14127cd8 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/license @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 00000000..ef18d8f9 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,36 @@ +// +build windows + +package sequences + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/vendor/github.com/mitchellh/go-homedir/BUILD.bazel b/vendor/github.com/mitchellh/go-homedir/BUILD.bazel deleted file mode 100644 index 59049e5b..00000000 --- a/vendor/github.com/mitchellh/go-homedir/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["homedir.go"], - importpath = "github.com/mitchellh/go-homedir", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["homedir_test.go"], - embed = [":go_default_library"], - importpath = "github.com/mitchellh/go-homedir", -) diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5..00000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go index acbb605d..fb87bef9 100644 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -141,14 +141,16 @@ func dirWindows() (string, error) { return home, nil } + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + drive := os.Getenv("HOMEDRIVE") path := os.Getenv("HOMEPATH") home := drive + path if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") } return home, nil diff --git a/vendor/github.com/mitchellh/go-homedir/homedir_test.go b/vendor/github.com/mitchellh/go-homedir/homedir_test.go deleted file mode 100644 index cd52127d..00000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package homedir - -import ( - "os" - "os/user" - "path/filepath" - "testing" -) - -func patchEnv(key, value string) func() { - bck := os.Getenv(key) - deferFunc := func() { - os.Setenv(key, bck) - } - - if value != "" { - os.Setenv(key, value) - } else { - os.Unsetenv(key) - } - - return deferFunc -} - -func BenchmarkDir(b *testing.B) { - // We do this for any "warmups" - for i := 0; i < 10; i++ { - Dir() - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Dir() - } -} - -func TestDir(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Fatalf("err: %s", err) - } - - dir, err := Dir() - if err != nil { - t.Fatalf("err: %s", err) - } - - if u.HomeDir != dir { - t.Fatalf("%#v != %#v", u.HomeDir, dir) - } - - DisableCache = true - defer func() { DisableCache = false }() - defer patchEnv("HOME", "")() - dir, err = Dir() - if err != nil { - t.Fatalf("err: %s", err) - } - - if u.HomeDir != dir { - t.Fatalf("%#v != %#v", u.HomeDir, dir) - } -} - -func TestExpand(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Fatalf("err: %s", err) - } - - cases := []struct { - Input string - Output string - Err bool - }{ - { - "/foo", - "/foo", - false, - }, - - { - "~/foo", - filepath.Join(u.HomeDir, "foo"), - false, - }, - - { - "", - "", - false, - }, - - { - "~", - u.HomeDir, - false, - }, - - { - "~foo/foo", - "", - true, - }, - } - - for _, tc := range cases { - actual, err := Expand(tc.Input) - if (err != nil) != tc.Err { - t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err) - } - - if actual != tc.Output { - t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual) - } - } - - DisableCache = true - defer func() { DisableCache = false }() - defer patchEnv("HOME", "/custom/path/")() - expected := filepath.Join("/", "custom", "path", "foo/bar") - actual, err := Expand("~/foo/bar") - - if err != nil { - t.Errorf("No error is expected, got: %v", err) - } else if actual != expected { - t.Errorf("Expected: %v; actual: %v", expected, actual) - } -} diff --git a/vendor/github.com/nightlyone/lockfile/.gitignore b/vendor/github.com/nightlyone/lockfile/.gitignore deleted file mode 100644 index 5a05665d..00000000 --- a/vendor/github.com/nightlyone/lockfile/.gitignore +++ /dev/null @@ -1,27 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# popular temporaries -.err -.out -.diff - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/nightlyone/lockfile/.gitmodules b/vendor/github.com/nightlyone/lockfile/.gitmodules deleted file mode 100644 index 6faa9e34..00000000 --- a/vendor/github.com/nightlyone/lockfile/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "git-hooks"] - path = git-hooks - url = https://github.com/nightlyone/git-hooks diff --git a/vendor/github.com/nightlyone/lockfile/.travis.yml b/vendor/github.com/nightlyone/lockfile/.travis.yml deleted file mode 100644 index 76e5962b..00000000 --- a/vendor/github.com/nightlyone/lockfile/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: - - 1.4.3 - - 1.6.2 - - tip - -# Only test commits to production branch and all pull requests -branches: - only: - - master - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/nightlyone/lockfile/BUILD.bazel b/vendor/github.com/nightlyone/lockfile/BUILD.bazel deleted file mode 100644 index e35e6b0d..00000000 --- a/vendor/github.com/nightlyone/lockfile/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "lockfile.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "lockfile_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "lockfile_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/nightlyone/lockfile", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["lockfile_test.go"], - embed = [":go_default_library"], - importpath = "github.com/nightlyone/lockfile", -) diff --git a/vendor/github.com/nightlyone/lockfile/README.md b/vendor/github.com/nightlyone/lockfile/README.md deleted file mode 100644 index c35235cd..00000000 --- a/vendor/github.com/nightlyone/lockfile/README.md +++ /dev/null @@ -1,52 +0,0 @@ -lockfile -========= -Handle locking via pid files. - -[![Build Status Unix][1]][2] -[![Build status Windows][3]][4] - -[1]: https://secure.travis-ci.org/nightlyone/lockfile.png -[2]: https://travis-ci.org/nightlyone/lockfile -[3]: https://ci.appveyor.com/api/projects/status/7mojkmauj81uvp8u/branch/master?svg=true -[4]: https://ci.appveyor.com/project/nightlyone/lockfile/branch/master - - - -install -------- -Install [Go 1][5], either [from source][6] or [with a prepackaged binary][7]. -For Windows suport, Go 1.4 or newer is required. - -Then run - - go get github.com/nightlyone/lockfile - -[5]: http://golang.org -[6]: http://golang.org/doc/install/source -[7]: http://golang.org/doc/install - -LICENSE -------- -MIT - -documentation -------------- -[package documentation at godoc.org](http://godoc.org/github.com/nightlyone/lockfile) - -install -------------------- - go get github.com/nightlyone/lockfile - - -contributing -============ - -Contributions are welcome. Please open an issue or send me a pull request for a dedicated branch. -Make sure the git commit hooks show it works. - -git commit hooks ------------------------ -enable commit hooks via - - cd .git ; rm -rf hooks; ln -s ../git-hooks hooks ; cd .. - diff --git a/vendor/github.com/nightlyone/lockfile/appveyor.yml b/vendor/github.com/nightlyone/lockfile/appveyor.yml deleted file mode 100644 index cf72a58b..00000000 --- a/vendor/github.com/nightlyone/lockfile/appveyor.yml +++ /dev/null @@ -1,12 +0,0 @@ -clone_folder: c:\gopath\src\github.com\nightlyone\lockfile - -environment: - GOPATH: c:\gopath - -install: - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/vendor/github.com/nightlyone/lockfile/lockfile_test.go b/vendor/github.com/nightlyone/lockfile/lockfile_test.go deleted file mode 100644 index be2c821e..00000000 --- a/vendor/github.com/nightlyone/lockfile/lockfile_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package lockfile - -import ( - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "strconv" - "testing" -) - -func ExampleLockfile() { - lock, err := New(filepath.Join(os.TempDir(), "lock.me.now.lck")) - if err != nil { - fmt.Printf("Cannot init lock. reason: %v", err) - panic(err) // handle properly please! - } - err = lock.TryLock() - - // Error handling is essential, as we only try to get the lock. - if err != nil { - fmt.Printf("Cannot lock %q, reason: %v", lock, err) - panic(err) // handle properly please! - } - - defer lock.Unlock() - - fmt.Println("Do stuff under lock") - // Output: Do stuff under lock -} - -func TestBasicLockUnlock(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - panic(err) - } - - lf, err := New(path) - if err != nil { - t.Fail() - fmt.Println("Error making lockfile: ", err) - return - } - - err = lf.TryLock() - if err != nil { - t.Fail() - fmt.Println("Error locking lockfile: ", err) - return - } - - err = lf.Unlock() - if err != nil { - t.Fail() - fmt.Println("Error unlocking lockfile: ", err) - return - } -} - -func GetDeadPID() int { - // I have no idea how windows handles large PIDs, or if they even exist. - // So limit it to be less or equal to 4096 to be safe. - - const maxPid = 4095 - - // limited iteration, so we finish one day - seen := map[int]bool{} - for len(seen) < maxPid { - pid := rand.Intn(maxPid + 1) // see https://godoc.org/math/rand#Intn why - if seen[pid] { - continue - } - seen[pid] = true - running, err := isRunning(pid) - if err != nil { - fmt.Println("Error checking PID: ", err) - continue - } - - if !running { - return pid - } - } - panic(fmt.Sprintf("all pids lower %d are used, cannot test this", maxPid)) -} - -func TestBusy(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - t.Fatal(err) - return - } - - pid := os.Getppid() - - if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil { - t.Fatal(err) - return - } - defer os.Remove(path) - - lf, err := New(path) - if err != nil { - t.Fatal(err) - return - } - - got := lf.TryLock() - if got != ErrBusy { - t.Fatalf("expected error %q, got %v", ErrBusy, got) - return - } -} - -func TestRogueDeletion(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - t.Fatal(err) - return - } - lf, err := New(path) - if err != nil { - t.Fatal(err) - return - } - err = lf.TryLock() - if err != nil { - t.Fatal(err) - return - } - err = os.Remove(path) - if err != nil { - t.Fatal(err) - return - } - - got := lf.Unlock() - if got != ErrRogueDeletion { - t.Fatalf("unexpected error: %v", got) - return - } -} - -func TestRogueDeletionDeadPid(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - t.Fatal(err) - return - } - lf, err := New(path) - if err != nil { - t.Fatal(err) - return - } - err = lf.TryLock() - if err != nil { - t.Fatal(err) - return - } - - pid := GetDeadPID() - if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil { - t.Fatal(err) - return - } - defer os.Remove(path) - - err = lf.Unlock() - if err != ErrRogueDeletion { - t.Fatalf("unexpected error: %v", err) - return - } - - if _, err := os.Stat(path); os.IsNotExist(err) { - t.Fatal("lockfile should not be deleted by us, if we didn't create it") - } else { - if err != nil { - t.Fatalf("unexpected error %v", err) - } - } -} - -func TestRemovesStaleLockOnDeadOwner(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - t.Fatal(err) - return - } - lf, err := New(path) - if err != nil { - t.Fatal(err) - return - } - pid := GetDeadPID() - if err := ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666); err != nil { - t.Fatal(err) - return - } - err = lf.TryLock() - if err != nil { - t.Fatal(err) - return - } - - if err := lf.Unlock(); err != nil { - t.Fatal(err) - return - } -} - -func TestInvalidPidLeadToReplacedLockfileAndSuccess(t *testing.T) { - path, err := filepath.Abs("test_lockfile.pid") - if err != nil { - t.Fatal(err) - return - } - if err := ioutil.WriteFile(path, []byte("\n"), 0666); err != nil { - t.Fatal(err) - return - } - defer os.Remove(path) - - lf, err := New(path) - if err != nil { - t.Fatal(err) - return - } - - if err := lf.TryLock(); err != nil { - t.Fatalf("unexpected error: %v", err) - return - } - - // now check if file exists and contains the correct content - got, err := ioutil.ReadFile(path) - if err != nil { - t.Fatalf("unexpected error %v", err) - return - } - want := fmt.Sprintf("%d\n", os.Getpid()) - if string(got) != want { - t.Fatalf("got %q, want %q", got, want) - } -} - -func TestScanPidLine(t *testing.T) { - tests := [...]struct { - input []byte - pid int - xfail error - }{ - { - xfail: ErrInvalidPid, - }, - { - input: []byte(""), - xfail: ErrInvalidPid, - }, - { - input: []byte("\n"), - xfail: ErrInvalidPid, - }, - { - input: []byte("-1\n"), - xfail: ErrInvalidPid, - }, - { - input: []byte("0\n"), - xfail: ErrInvalidPid, - }, - { - input: []byte("a\n"), - xfail: ErrInvalidPid, - }, - { - input: []byte("1\n"), - pid: 1, - }, - } - - // test positive cases first - for step, tc := range tests { - if tc.xfail != nil { - continue - } - want := tc.pid - got, err := scanPidLine(tc.input) - if err != nil { - t.Fatalf("%d: unexpected error %v", step, err) - } - if got != want { - t.Errorf("%d: expected pid %d, got %d", step, want, got) - } - } - - // test negative cases now - for step, tc := range tests { - if tc.xfail == nil { - continue - } - want := tc.xfail - _, got := scanPidLine(tc.input) - if got != want { - t.Errorf("%d: expected error %v, got %v", step, want, got) - } - } -} diff --git a/vendor/github.com/nightlyone/lockfile/lockfile_unix.go b/vendor/github.com/nightlyone/lockfile/lockfile_unix.go index 742b041f..d724e701 100644 --- a/vendor/github.com/nightlyone/lockfile/lockfile_unix.go +++ b/vendor/github.com/nightlyone/lockfile/lockfile_unix.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris aix package lockfile diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap deleted file mode 100644 index ba611cb2..00000000 --- a/vendor/github.com/opencontainers/go-digest/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Stephen J Day diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml deleted file mode 100644 index 45fa4b9e..00000000 --- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ /dev/null @@ -1,12 +0,0 @@ -approve_by_comment: true -approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' -reject_regex: ^Rejected -reset_on_push: true -author_approval: ignored -signed_off_by: - required: true -reviewers: - teams: - - go-digest-maintainers - name: default - required: 2 diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml deleted file mode 100644 index 7ea4ed1d..00000000 --- a/vendor/github.com/opencontainers/go-digest/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go -go: - - 1.7 - - master diff --git a/vendor/github.com/opencontainers/go-digest/BUILD.bazel b/vendor/github.com/opencontainers/go-digest/BUILD.bazel deleted file mode 100644 index cdb897c6..00000000 --- a/vendor/github.com/opencontainers/go-digest/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "algorithm.go", - "digest.go", - "digester.go", - "doc.go", - "verifiers.go", - ], - importpath = "github.com/opencontainers/go-digest", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "algorithm_test.go", - "digest_test.go", - "verifiers_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/opencontainers/go-digest", -) diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md deleted file mode 100644 index e4d962ac..00000000 --- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on this project? Awesome! Here are instructions to get you started. - -This project is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS deleted file mode 100644 index 42a29795..00000000 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ /dev/null @@ -1,9 +0,0 @@ -Aaron Lehmann (@aaronlehmann) -Brandon Philips (@philips) -Brendan Burns (@brendandburns) -Derek McGowan (@dmcgowan) -Jason Bouzane (@jbouzane) -John Starks (@jstarks) -Jonathan Boulle (@jonboulle) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md deleted file mode 100644 index 0f5a0409..00000000 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# go-digest - -[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) - -Common digest package used across the container ecosystem. - -Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. - -# What is a digest? - -A digest is just a hash. - -The most common use case for a digest is to create a content -identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) -systems: - -```go -id := digest.FromBytes([]byte("my content")) -``` - -In the example above, the id can be used to uniquely identify -the byte slice "my content". This allows two disparate applications -to agree on a verifiable identifier without having to trust one -another. - -An identifying digest can be verified, as follows: - -```go -if id != digest.FromBytes([]byte("my content")) { - return errors.New("the content has changed!") -} -``` - -A `Verifier` type can be used to handle cases where an `io.Reader` -makes more sense: - -```go -rd := getContent() -verifier := id.Verifier() -io.Copy(verifier, rd) - -if !verifier.Verified() { - return errors.New("the content has changed!") -} -``` - -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this -can power a rich, safe, content distribution system. - -# Usage - -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is -considered the best resource, a few important items need to be called -out when using this package. - -1. Make sure to import the hash implementations into your application - or the package will panic. You should have something like the - following in the main (or other entrypoint) of your application: - - ```go - import ( - _ "crypto/sha256" - _ "crypto/sha512" - ) - ``` - This may seem inconvenient but it allows you replace the hash - implementations with others, such as https://github.com/stevvooe/resumable. - -2. Even though `digest.Digest` may be assemable as a string, _always_ - verify your input with `digest.Parse` or use `Digest.Validate` - when accepting untrusted input. While there are measures to - avoid common problems, this will ensure you have valid digests - in the rest of your application. - -# Stability - -The Go API, at this stage, is considered stable, unless otherwise noted. - -As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). - -# Contributing - -This package is considered fairly complete. It has been in production -in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. If you think there is a -missing feature, please file a bug clearly describing the problem and -the alternatives you tried before submitting a PR. - -# Reporting security issues - -Please DO NOT file a public issue, instead send your report privately to -security@opencontainers.org. - -The maintainers take security seriously. If you discover a security issue, -please bring it to their attention right away! - -If you are reporting a security issue, do not create an issue or file a pull -request on GitHub. Instead, disclose the issue responsibly by sending an email -to security@opencontainers.org (which is inhabited only by the maintainers of -the various OCI projects). - -# Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/opencontainers/go-digest/algorithm_test.go b/vendor/github.com/opencontainers/go-digest/algorithm_test.go deleted file mode 100644 index d50e8494..00000000 --- a/vendor/github.com/opencontainers/go-digest/algorithm_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "bytes" - "crypto/rand" - _ "crypto/sha256" - _ "crypto/sha512" - "flag" - "fmt" - "strings" - "testing" -) - -func TestFlagInterface(t *testing.T) { - var ( - alg Algorithm - flagSet flag.FlagSet - ) - - flagSet.Var(&alg, "algorithm", "set the digest algorithm") - for _, testcase := range []struct { - Name string - Args []string - Err error - Expected Algorithm - }{ - { - Name: "Invalid", - Args: []string{"-algorithm", "bean"}, - Err: ErrDigestUnsupported, - }, - { - Name: "Default", - Args: []string{"unrelated"}, - Expected: "sha256", - }, - { - Name: "Other", - Args: []string{"-algorithm", "sha512"}, - Expected: "sha512", - }, - } { - t.Run(testcase.Name, func(t *testing.T) { - alg = Canonical - if err := flagSet.Parse(testcase.Args); err != testcase.Err { - if testcase.Err == nil { - t.Fatal("unexpected error", err) - } - - // check that flag package returns correct error - if !strings.Contains(err.Error(), testcase.Err.Error()) { - t.Fatalf("unexpected error: %v != %v", err, testcase.Err) - } - return - } - - if alg != testcase.Expected { - t.Fatalf("unexpected algorithm: %v != %v", alg, testcase.Expected) - } - }) - } -} - -func TestFroms(t *testing.T) { - p := make([]byte, 1<<20) - rand.Read(p) - - for alg := range algorithms { - h := alg.Hash() - h.Write(p) - expected := Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) - readerDgst, err := alg.FromReader(bytes.NewReader(p)) - if err != nil { - t.Fatalf("error calculating hash from reader: %v", err) - } - - dgsts := []Digest{ - alg.FromBytes(p), - alg.FromString(string(p)), - readerDgst, - } - - if alg == Canonical { - readerDgst, err := FromReader(bytes.NewReader(p)) - if err != nil { - t.Fatalf("error calculating hash from reader: %v", err) - } - - dgsts = append(dgsts, - FromBytes(p), - FromString(string(p)), - readerDgst) - } - for _, dgst := range dgsts { - if dgst != expected { - t.Fatalf("unexpected digest %v != %v", dgst, expected) - } - } - } -} diff --git a/vendor/github.com/opencontainers/go-digest/digest_test.go b/vendor/github.com/opencontainers/go-digest/digest_test.go deleted file mode 100644 index cc3b648a..00000000 --- a/vendor/github.com/opencontainers/go-digest/digest_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "testing" -) - -func TestParseDigest(t *testing.T) { - for _, testcase := range []struct { - input string - err error - algorithm Algorithm - encoded string - }{ - { - input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "sha256", - encoded: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - algorithm: "sha384", - encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - }, - { - // empty hex - input: "sha256:", - err: ErrDigestInvalidFormat, - }, - { - // empty hex - input: ":", - err: ErrDigestInvalidFormat, - }, - { - // just hex - input: "d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestInvalidFormat, - }, - { - // not hex - input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", - err: ErrDigestInvalidLength, - }, - { - // too short - input: "sha256:abcdef0123456789", - err: ErrDigestInvalidLength, - }, - { - // too short (from different algorithm) - input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - err: ErrDigestInvalidLength, - }, - { - input: "foo:d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestUnsupported, - }, - { - // repeated separators - input: "sha384__foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - err: ErrDigestInvalidFormat, - }, - { - // ensure that we parse, but we don't have support for the algorithm - input: "sha384.foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - algorithm: "sha384.foo+bar", - encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - err: ErrDigestUnsupported, - }, - { - input: "sha384_foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - algorithm: "sha384_foo+bar", - encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - err: ErrDigestUnsupported, - }, - { - input: "sha256+b64:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564", - algorithm: "sha256+b64", - encoded: "LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564", - err: ErrDigestUnsupported, - }, - { - input: "sha256:E58FCF7418D4390DEC8E8FB69D88C06EC07039D651FEDD3AA72AF9972E7D046B", - err: ErrDigestInvalidFormat, - }, - } { - digest, err := Parse(testcase.input) - if err != testcase.err { - t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) - } - - if testcase.err != nil { - continue - } - - if digest.Algorithm() != testcase.algorithm { - t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) - } - - if digest.Encoded() != testcase.encoded { - t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Encoded(), testcase.encoded) - } - - // Parse string return value and check equality - newParsed, err := Parse(digest.String()) - - if err != nil { - t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) - } - - if newParsed != digest { - t.Fatalf("expected equal: %q != %q", newParsed, digest) - } - - newFromHex := NewDigestFromEncoded(newParsed.Algorithm(), newParsed.Encoded()) - if newFromHex != digest { - t.Fatalf("%v != %v", newFromHex, digest) - } - } -} diff --git a/vendor/github.com/opencontainers/go-digest/verifiers_test.go b/vendor/github.com/opencontainers/go-digest/verifiers_test.go deleted file mode 100644 index d67bb1bc..00000000 --- a/vendor/github.com/opencontainers/go-digest/verifiers_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "bytes" - "crypto/rand" - "io" - "reflect" - "testing" -) - -func TestDigestVerifier(t *testing.T) { - p := make([]byte, 1<<20) - rand.Read(p) - digest := FromBytes(p) - - verifier := digest.Verifier() - - io.Copy(verifier, bytes.NewReader(p)) - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } -} - -// TestVerifierUnsupportedDigest ensures that unsupported digest validation is -// flowing through verifier creation. -func TestVerifierUnsupportedDigest(t *testing.T) { - for _, testcase := range []struct { - Name string - Digest Digest - Expected interface{} // expected panic target - }{ - { - Name: "Empty", - Digest: "", - Expected: "no ':' separator in digest \"\"", - }, - { - Name: "EmptyAlg", - Digest: ":", - Expected: "empty digest algorithm, validate before calling Algorithm.Hash()", - }, - { - Name: "Unsupported", - Digest: Digest("bean:0123456789abcdef"), - Expected: "bean not available (make sure it is imported)", - }, - { - Name: "Garbage", - Digest: Digest("sha256-garbage:pure"), - Expected: "sha256-garbage not available (make sure it is imported)", - }, - } { - t.Run(testcase.Name, func(t *testing.T) { - expected := testcase.Expected - defer func() { - recovered := recover() - if !reflect.DeepEqual(recovered, expected) { - t.Fatalf("unexpected recover: %v != %v", recovered, expected) - } - }() - - _ = testcase.Digest.Verifier() - }) - } -} diff --git a/vendor/github.com/opencontainers/image-spec/.gitignore b/vendor/github.com/opencontainers/image-spec/.gitignore deleted file mode 100644 index dcc5d552..00000000 --- a/vendor/github.com/opencontainers/image-spec/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/oci-validate-examples -output -header.html diff --git a/vendor/github.com/opencontainers/image-spec/.header b/vendor/github.com/opencontainers/image-spec/.header deleted file mode 100644 index 868470b6..00000000 --- a/vendor/github.com/opencontainers/image-spec/.header +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - diff --git a/vendor/github.com/opencontainers/image-spec/.pullapprove.yml b/vendor/github.com/opencontainers/image-spec/.pullapprove.yml deleted file mode 100644 index 0f56c1e0..00000000 --- a/vendor/github.com/opencontainers/image-spec/.pullapprove.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 2 - -requirements: - signed_off_by: - required: true - -group_defaults: - required: 2 - approve_by_comment: - enabled: true - approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' - reject_regex: ^Rejected - reset_on_push: - enabled: true - author_approval: - ignored: true - always_pending: - title_regex: ^WIP - explanation: 'Work in progress...' - conditions: - branches: - - master - -groups: - image-spec: - teams: - - image-spec-maintainers diff --git a/vendor/github.com/opencontainers/image-spec/.travis.yml b/vendor/github.com/opencontainers/image-spec/.travis.yml deleted file mode 100644 index 17f4840c..00000000 --- a/vendor/github.com/opencontainers/image-spec/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -go: - - 1.7 - -sudo: required - -services: - - docker - -before_script: - - export PATH=$HOME/gopath/bin:$PATH - -before_install: - - docker pull vbatts/pandoc - - make install.tools - - go get -u github.com/alecthomas/gometalinter - - gometalinter --install - - go get -t -d ./... - -install: true - -script: - - env | grep TRAVIS_ - - make .gitvalidation - - make lint - - make check-license - - make test - - make docs diff --git a/vendor/github.com/opencontainers/image-spec/GOVERNANCE.md b/vendor/github.com/opencontainers/image-spec/GOVERNANCE.md deleted file mode 100644 index 92c86094..00000000 --- a/vendor/github.com/opencontainers/image-spec/GOVERNANCE.md +++ /dev/null @@ -1,70 +0,0 @@ -# Project governance - -The [OCI charter][charter] §5.b.viii tasks an OCI Project's maintainers (listed in the repository's MAINTAINERS file and sometimes referred to as "the TDC", [§5.e][charter]) with: - -> Creating, maintaining and enforcing governance guidelines for the TDC, approved by the maintainers, and which shall be posted visibly for the TDC. - -This section describes generic rules and procedures for fulfilling that mandate. - -## Proposing a motion - -A maintainer SHOULD propose a motion on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with another maintainer as a co-sponsor. - -## Voting - -Voting on a proposed motion SHOULD happen on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with maintainers posting LGTM or REJECT. -Maintainers MAY also explicitly not vote by posting ABSTAIN (which is useful to revert a previous vote). -Maintainers MAY post multiple times (e.g. as they revise their position based on feedback), but only their final post counts in the tally. -A proposed motion is adopted if two-thirds of votes cast, a quorum having voted, are in favor of the release. - -Voting SHOULD remain open for a week to collect feedback from the wider community and allow the maintainers to digest the proposed motion. -Under exceptional conditions (e.g. non-major security fix releases) proposals which reach quorum with unanimous support MAY be adopted earlier. - -A maintainer MAY choose to reply with REJECT. -A maintainer posting a REJECT MUST include a list of concerns or links to written documentation for those concerns (e.g. GitHub issues or mailing-list threads). -The maintainers SHOULD try to resolve the concerns and wait for the rejecting maintainer to change their opinion to LGTM. -However, a motion MAY be adopted with REJECTs, as outlined in the previous paragraphs. - -## Quorum - -A quorum is established when at least two-thirds of maintainers have voted. - -For projects that are not specifications, a [motion to release](#release-approval) MAY be adopted if the tally is at least three LGTMs and no REJECTs, even if three votes does not meet the usual two-thirds quorum. - -## Security issues - -Motions with sensitive security implications MUST be proposed on the security@opencontainers.org mailing list instead of dev@opencontainers.org, but should otherwise follow the standard [proposal](#proposing-a-motion) process. -The security@opencontainers.org mailing list includes all members of the TOB. -The TOB will contact the project maintainers and provide a channel for discussing and voting on the motion, but voting will otherwise follow the standard [voting](#voting) and [quorum](#quorum) rules. -The TOB and project maintainers will work together to notify affected parties before making an adopted motion public. - -## Amendments - -The [project governance](#project-governance) rules and procedures MAY be amended or replaced using the procedures themselves. -The MAINTAINERS of this project governance document is the total set of MAINTAINERS from all Open Containers projects (runC, runtime-spec, and image-spec). - -## Subject templates - -Maintainers are busy and get lots of email. -To make project proposals recognizable, proposed motions SHOULD use the following subject templates. - -### Proposing a motion - -> [{project} VOTE]: {motion description} (closes {end of voting window}) - -For example: - -> [runtime-spec VOTE]: Tag 0647920 as 1.0.0-rc (closes 2016-06-03 20:00 UTC) - -### Tallying results - -After voting closes, a maintainer SHOULD post a tally to the motion thread with a subject template like: - -> [{project} {status}]: {motion description} (+{LGTMs} -{REJECTs} #{ABSTAINs}) - -Where `{status}` is either `adopted` or `rejected`. -For example: - -> [runtime-spec adopted]: Tag 0647920 as 1.0.0-rc (+6 -0 #3) - -[charter]: https://www.opencontainers.org/about/governance diff --git a/vendor/github.com/opencontainers/image-spec/HACKING.md b/vendor/github.com/opencontainers/image-spec/HACKING.md deleted file mode 100644 index fd38866d..00000000 --- a/vendor/github.com/opencontainers/image-spec/HACKING.md +++ /dev/null @@ -1,104 +0,0 @@ -# Hacking Guide - -## Overview - -This guide contains instructions for building artifacts contained in this repository. - -### Go - -This spec includes several Go packages, and a command line tool considered to be a reference implementation of the OCI image specification. - -Prerequisites: -* Go - current release only, earlier releases are not supported -* make - -The following make targets are relevant for any work involving the Go packages. - -### Linting - -The included Go source code is being examined for any linting violations not included in the standard Go compiler. Linting is done using [gometalinter](https://github.com/alecthomas/gometalinter). - -Invocation: -``` -$ make lint -``` - -### Tests - -This target executes all Go based tests. - -Invocation: -``` -$ make test -$ make validate-examples -``` - -### Virtual schema http/FileSystem - -The `schema` validator uses a virtual [http/FileSystem](https://golang.org/pkg/net/http/#FileSystem) to load the JSON schema files for validating OCI images and/or manifests. -The virtual filesystem is generated using the `esc` tool and compiled into consumers of the `schema` package so the JSON schema files don't have to be distributed along with and consumer binaries. - -Whenever changes are being done in any of the `schema/*.json` files, one must refresh the generated virtual filesystem. -Otherwise schema changes will not be visible inside `schema` consumers. - -Prerequisites: -* [esc](https://github.com/mjibson/esc) - -Invocation: -``` -$ make schema-fs -``` - -### JSON schema formatting - -This target auto-formats all JSON files in the `schema` directory using the `jq` tool. - -Prerequisites: -* [jq](https://stedolan.github.io/jq/) >=1.5 - -Invocation: -``` -$ make fmt -``` - -### OCI image specification PDF/HTML documentation files - -This target generates a PDF/HTML version of the OCI image specification. - -Prerequisites: -* [Docker](https://www.docker.com/) - -Invocation: -``` -$ make docs -``` - -### License header check - -This target checks if the source code includes necessary headers. - -Invocation: -``` -$ make check-license -``` - -### Clean build artifacts - -This target cleans all generated/compiled artifacts. - -Invocation: -``` -$ make clean -``` - -### Create PNG images from dot files - -This target generates PNG image files from DOT source files in the `img` directory. - -Prerequisites: -* [graphviz](http://www.graphviz.org/) - -Invocation: -``` -$ make img/media-types.png -``` diff --git a/vendor/github.com/opencontainers/image-spec/MAINTAINERS b/vendor/github.com/opencontainers/image-spec/MAINTAINERS deleted file mode 100644 index 63de3293..00000000 --- a/vendor/github.com/opencontainers/image-spec/MAINTAINERS +++ /dev/null @@ -1,9 +0,0 @@ -Brandon Philips (@philips) -Brendan Burns (@brendandburns) -Jason Bouzane (@jbouzane) -John Starks (@jstarks) -Jonathan Boulle (@jonboulle) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) -Aleksa Sarai (@cyphar) -Keyang Xie (@xiekeyang) diff --git a/vendor/github.com/opencontainers/image-spec/Makefile b/vendor/github.com/opencontainers/image-spec/Makefile deleted file mode 100644 index a8d5fd39..00000000 --- a/vendor/github.com/opencontainers/image-spec/Makefile +++ /dev/null @@ -1,146 +0,0 @@ -GO15VENDOREXPERIMENT=1 -export GO15VENDOREXPERIMENT - -DOCKER ?= $(shell command -v docker 2>/dev/null) -PANDOC ?= $(shell command -v pandoc 2>/dev/null) - -ifeq "$(strip $(PANDOC))" '' - ifneq "$(strip $(DOCKER))" '' - PANDOC = $(DOCKER) run \ - -it \ - --rm \ - -v $(shell pwd)/:/input/:ro \ - -v $(shell pwd)/$(OUTPUT_DIRNAME)/:/$(OUTPUT_DIRNAME)/ \ - -u $(shell id -u) \ - --workdir /input \ - docker.io/vbatts/pandoc:1.17.0.3-2.fc25.x86_64 - PANDOC_SRC := /input/ - PANDOC_DST := / - endif -endif - -# These docs are in an order that determines how they show up in the PDF/HTML docs. -DOC_FILES := \ - spec.md \ - media-types.md \ - descriptor.md \ - image-layout.md \ - manifest.md \ - image-index.md \ - layer.md \ - config.md \ - annotations.md \ - conversion.md \ - considerations.md \ - implementations.md - -FIGURE_FILES := \ - img/media-types.png - -OUTPUT_DIRNAME ?= output/ -DOC_FILENAME ?= oci-image-spec - -EPOCH_TEST_COMMIT ?= v0.2.0 - -TOOLS := esc gitvalidation glide glide-vc - -default: check-license lint test - -help: - @echo "Usage: make " - @echo - @echo " * 'docs' - produce document in the $(OUTPUT_DIRNAME) directory" - @echo " * 'fmt' - format the json with indentation" - @echo " * 'validate-examples' - validate the examples in the specification markdown files" - @echo " * 'schema-fs' - regenerate the virtual schema http/FileSystem" - @echo " * 'check-license' - check license headers in source files" - @echo " * 'lint' - Execute the source code linter" - @echo " * 'test' - Execute the unit tests" - @echo " * 'img/*.png' - Generate PNG from dot file" - -fmt: - for i in schema/*.json ; do jq --indent 2 -M . "$${i}" > xx && cat xx > "$${i}" && rm xx ; done - -docs: $(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf $(OUTPUT_DIRNAME)/$(DOC_FILENAME).html - -ifeq "$(strip $(PANDOC))" '' -$(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf: $(DOC_FILES) $(FIGURE_FILES) - $(error cannot build $@ without either pandoc or docker) -else -$(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf: $(DOC_FILES) $(FIGURE_FILES) - @mkdir -p $(OUTPUT_DIRNAME)/ && \ - $(PANDOC) -f markdown_github -t latex --latex-engine=xelatex -o $(PANDOC_DST)$@ $(patsubst %,$(PANDOC_SRC)%,$(DOC_FILES)) - ls -sh $(realpath $@) - -$(OUTPUT_DIRNAME)/$(DOC_FILENAME).html: header.html $(DOC_FILES) $(FIGURE_FILES) - @mkdir -p $(OUTPUT_DIRNAME)/ && \ - cp -ap img/ $(shell pwd)/$(OUTPUT_DIRNAME)/&& \ - $(PANDOC) -f markdown_github -t html5 -H $(PANDOC_SRC)header.html --standalone -o $(PANDOC_DST)$@ $(patsubst %,$(PANDOC_SRC)%,$(DOC_FILES)) - ls -sh $(realpath $@) -endif - -header.html: .tool/genheader.go specs-go/version.go - go run .tool/genheader.go > $@ - -validate-examples: schema/fs.go - go test -run TestValidate ./schema - -schema/fs.go: $(wildcard schema/*.json) schema/gen.go - cd schema && printf "%s\n\n%s\n" "$$(cat ../.header)" "$$(go generate)" > fs.go - -schema-fs: schema/fs.go - @echo "generating schema fs" - -check-license: - @echo "checking license headers" - @./.tool/check-license - -lint: - @echo "checking lint" - @./.tool/lint - -test: schema/fs.go - go test -race -cover $(shell go list ./... | grep -v /vendor/) - -img/%.png: img/%.dot - dot -Tpng $^ > $@ - - -# When this is running in travis, it will only check the travis commit range -.gitvalidation: - @which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make install.tools' target" && false) -ifdef TRAVIS_COMMIT_RANGE - git-validation -q -run DCO,short-subject,dangling-whitespace -else - git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD -endif - -install.tools: $(TOOLS:%=.install.%) - -.install.esc: - go get -u github.com/mjibson/esc - -.install.gitvalidation: - go get -u github.com/vbatts/git-validation - -.install.glide: - go get -u github.com/Masterminds/glide - -.install.glide-vc: - go get -u github.com/sgotti/glide-vc - -clean: - rm -rf *~ $(OUTPUT_DIRNAME) header.html - -.PHONY: \ - $(TOOLS:%=.install.%) \ - validate-examples \ - check-license \ - clean \ - lint \ - install.tools \ - docs \ - test \ - .gitvalidation \ - schema/fs.go \ - schema-fs diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md deleted file mode 100644 index 31e82786..00000000 --- a/vendor/github.com/opencontainers/image-spec/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# OCI Image Format Specification -
- - - -
- -The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format). - -**[The specification can be found here](spec.md).** - -This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema). -The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported. - -Additional documentation about how this group operates: - -- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md) -- [Roadmap](#roadmap) -- [Releases](RELEASES.md) -- [Project Documentation](project.md) - -The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). - -## Running an OCI Image - -The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec). -The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk. -At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle. -At this point the OCI Runtime Bundle would be run by an OCI Runtime. - -This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments: - -* docker run example.com/org/app:v1.0.0 -* rkt run example.com/org/app,version=v1.0.0 - -To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc). - -## FAQ - -**Q: Why doesn't this project mention distribution?** - -A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). -There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress. - -**Q: What happens to AppC or Docker Image Formats?** - -A: Existing formats can continue to be a proving ground for technologies, as needed. -The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have. - -Find more [FAQ on the OCI site](https://www.opencontainers.org/faq). - -## Roadmap - -The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the future improvements. - -# Contributing - -Development happens on GitHub for the spec. -Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). - -The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. - -## Discuss your design - -The project welcomes submissions, but please let everyone know what you are working on. - -Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. -This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. -It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. - -Typos and grammatical errors can go straight to a pull-request. -When in doubt, start on the [mailing-list](#mailing-list). - -## Weekly Call - -The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific). -Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed). -An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. -Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes]. - -## Mailing List - -You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev). - -## IRC - -OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). - -## Markdown style - -To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line. -This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length. -For example, this paragraph will span three lines in the Markdown source. - -## Git commit - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. -The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -You can add the sign off when creating the git commit via `git commit -s`. - -### Commit Style - -Simple house-keeping for clean git history. -Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). - -1. Separate the subject from body with a blank line -2. Limit the subject line to 50 characters -3. Capitalize the subject line -4. Do not end the subject line with a period -5. Use the imperative mood in the subject line -6. Wrap the body at 72 characters -7. Use the body to explain what and why vs. how - * If there was important/useful/essential conversation or information, copy or include a reference -8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") - - -[UberConference]: https://www.uberconference.com/opencontainers -[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ -[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ diff --git a/vendor/github.com/opencontainers/image-spec/RELEASES.md b/vendor/github.com/opencontainers/image-spec/RELEASES.md deleted file mode 100644 index e220042c..00000000 --- a/vendor/github.com/opencontainers/image-spec/RELEASES.md +++ /dev/null @@ -1,51 +0,0 @@ -# Releases - -The release process hopes to encourage early, consistent consensus-building during project development. -The mechanisms used are regular community communication on the mailing list about progress, scheduled meetings for issue resolution and release triage, and regularly paced and communicated releases. -Releases are proposed and adopted or rejected using the usual [project governance](GOVERNANCE.md) rules and procedures. - -An anti-pattern that we want to avoid is heavy development or discussions "late cycle" around major releases. -We want to build a community that is involved and communicates consistently through all releases instead of relying on "silent periods" as a judge of stability. - -## Parallel releases - -A single project MAY consider several motions to release in parallel. -However each motion to release after the initial 0.1.0 MUST be based on a previous release that has already landed. - -For example, runtime-spec maintainers may propose a v1.0.0-rc2 on the 1st of the month and a v0.9.1 bugfix on the 2nd of the month. -They may not propose a v1.0.0-rc3 until the v1.0.0-rc2 is accepted (on the 7th if the vote initiated on the 1st passes). - -## Specifications - -The OCI maintains three categories of projects: specifications, applications, and conformance-testing tools. -However, specification releases have special restrictions in the [OCI charter][charter]: - -* They are the target of backwards compatibility (§7.g), and -* They are subject to the OFWa patent grant (§8.d and e). - -To avoid unfortunate side effects (onerous backwards compatibity requirements or Member resignations), the following additional procedures apply to specification releases: - -### Planning a release - -Every OCI specification project SHOULD hold meetings that involve maintainers reviewing pull requests, debating outstanding issues, and planning releases. -This meeting MUST be advertised on the project README and MAY happen on a phone call, video conference, or on IRC. -Maintainers MUST send updates to the dev@opencontainers.org with results of these meetings. - -Before the specification reaches v1.0.0, the meetings SHOULD be weekly. -Once a specification has reached v1.0.0, the maintainers may alter the cadence, but a meeting MUST be held within four weeks of the previous meeting. - -The release plans, corresponding milestones and estimated due dates MUST be published on GitHub (e.g. https://github.com/opencontainers/runtime-spec/milestones). -GitHub milestones and issues are only used for community organization and all releases MUST follow the [project governance](GOVERNANCE.md) rules and procedures. - -### Timelines - -Specifications have a variety of different timelines in their lifecycle. - -* Pre-v1.0.0 specifications SHOULD release on a monthly cadence to garner feedback. -* Major specification releases MUST release at least three release candidates spaced a minimum of one week apart. - This means a major release like a v1.0.0 or v2.0.0 release will take 1 month at minimum: one week for rc1, one week for rc2, one week for rc3, and one week for the major release itself. - Maintainers SHOULD strive to make zero breaking changes during this cycle of release candidates and SHOULD restart the three-candidate count when a breaking change is introduced. - For example if a breaking change is introduced in v1.0.0-rc2 then the series would end with v1.0.0-rc4 and v1.0.0. -- Minor and patch releases SHOULD be made on an as-needed basis. - -[charter]: https://www.opencontainers.org/about/governance diff --git a/vendor/github.com/opencontainers/image-spec/annotations.md b/vendor/github.com/opencontainers/image-spec/annotations.md deleted file mode 100644 index f3ba371b..00000000 --- a/vendor/github.com/opencontainers/image-spec/annotations.md +++ /dev/null @@ -1,67 +0,0 @@ -# Annotations -Several components of the specification, like [Image Manifests](manifest.md) and [Descriptors](descriptor.md), feature an optional annotations property, whose format is common and defined in this section. - -This property contains arbitrary metadata. - -## Rules - -* Annotations MUST be a key-value map where both the key and value MUST be strings. -* While the value MUST be present, it MAY be an empty string. -* Keys MUST be unique within this map, and best practice is to namespace the keys. -* Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`. -* The prefix `org.opencontainers` is reserved for keys defined in Open Container Initiative (OCI) specifications and MUST NOT be used by other specifications and extensions. -* Keys using the `org.opencontainers.image` namespace are reserved for use in the OCI Image Specification and MUST NOT be used by other specifications and extensions, including other OCI specifications. -* If there are no annotations then this property MUST either be absent or be an empty map. -* Consumers MUST NOT generate an error if they encounter an unknown annotation key. - -## Pre-Defined Annotation Keys - -This specification defines the following annotation keys, intended for but not limited to [image index](image-index.md) and image [manifest](manifest.md) authors: -* **org.opencontainers.image.created** date and time on which the image was built (string, date-time as defined by [RFC 3339](https://tools.ietf.org/html/rfc3339#section-5.6)). -* **org.opencontainers.image.authors** contact details of the people or organization responsible for the image (freeform string) -* **org.opencontainers.image.url** URL to find more information on the image (string) -* **org.opencontainers.image.documentation** URL to get documentation on the image (string) -* **org.opencontainers.image.source** URL to get source code for building the image (string) -* **org.opencontainers.image.version** version of the packaged software - * The version MAY match a label or tag in the source code repository - * version MAY be [Semantic versioning-compatible](http://semver.org/) -* **org.opencontainers.image.revision** Source control revision identifier for the packaged software. -* **org.opencontainers.image.vendor** Name of the distributing entity, organization or individual. -* **org.opencontainers.image.licenses** License(s) under which contained software is distributed as an [SPDX License Expression][spdx-license-expression]. -* **org.opencontainers.image.ref.name** Name of the reference for a target (string). - * SHOULD only be considered valid when on descriptors on `index.json` within [image layout](image-layout.md). - * Character set of the value SHOULD conform to alphanum of `A-Za-z0-9` and separator set of `-._:@/+` - * The reference must match the following [grammar](considerations.md#ebnf): - ``` - ref ::= component ("/" component)* - component ::= alphanum (separator alphanum)* - alphanum ::= [A-Za-z0-9]+ - separator ::= [-._:@+] | "--" - ``` -* **org.opencontainers.image.title** Human-readable title of the image (string) -* **org.opencontainers.image.description** Human-readable description of the software packaged in the image (string) - -## Back-compatibility with Label Schema - -[Label Schema](https://label-schema.org) defined a number of conventional labels for container images, and these are now superceded by annotations with keys starting **org.opencontainers.image**. - -While users are encouraged to use the **org.opencontainers.image** keys, tools MAY choose to support compatible annotations using the **org.label-schema** prefix as follows. - -| `org.opencontainers.image` prefix | `org.label-schema prefix` | Compatibility notes | -|---------------------------|-------------------------|---------------------| -| `created` | `build-date` | Compatible | -| `url` | `url` | Compatible | -| `source` | `vcs-url` | Compatible | -| `version` | `version` | Compatible | -| `revision` | `vcs-ref` | Compatible | -| `vendor` | `vendor` | Compatible | -| `title` | `name` | Compatible | -| `description` | `description` | Compatible | -| `documentation` | `usage` | Value is compatible if the documentation is located by a URL | -| `authors` | | No equivalent in Label Schema | -| `licenses` | | No equivalent in Label Schema | -| `ref.name` | | No equivalent in Label Schema | -| | `schema-version`| No equivalent in the OCI Image Spec | -| | `docker.*`, `rkt.*` | No equivalent in the OCI Image Spec | - -[spdx-license-expression]: https://spdx.org/spdx-specification-21-web-version#h.jxpfx0ykyb60 diff --git a/vendor/github.com/opencontainers/image-spec/config.md b/vendor/github.com/opencontainers/image-spec/config.md deleted file mode 100644 index 398296ae..00000000 --- a/vendor/github.com/opencontainers/image-spec/config.md +++ /dev/null @@ -1,274 +0,0 @@ -# OCI Image Configuration - -An OCI *Image* is an ordered collection of root filesystem changes and the corresponding execution parameters for use within a container runtime. -This specification outlines the JSON format describing images for use with a container runtime and execution tool and its relationship to filesystem changesets, described in [Layers](layer.md). - -This section defines the `application/vnd.oci.image.config.v1+json` [media type](media-types.md). - -## Terminology - -This specification uses the following terms: - -### [Layer](layer.md) - -* Image filesystems are composed of *layers*. -* Each layer represents a set of filesystem changes in a tar-based [layer format](layer.md), recording files to be added, changed, or deleted relative to its parent layer. -* Layers do not have configuration metadata such as environment variables or default arguments - these are properties of the image as a whole rather than any particular layer. -* Using a layer-based or union filesystem such as AUFS, or by computing the diff from filesystem snapshots, the filesystem changeset can be used to present a series of image layers as if they were one cohesive filesystem. - -### Image JSON - -* Each image has an associated JSON structure which describes some basic information about the image such as date created, author, as well as execution/runtime configuration like its entrypoint, default arguments, networking, and volumes. -* The JSON structure also references a cryptographic hash of each layer used by the image, and provides history information for those layers. -* This JSON is considered to be immutable, because changing it would change the computed [ImageID](#imageid). -* Changing it means creating a new derived image, instead of changing the existing image. - -### Layer DiffID - -A layer DiffID is the digest over the layer's uncompressed tar archive and serialized in the descriptor digest format, e.g., `sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9`. -Layers SHOULD be packed and unpacked reproducibly to avoid changing the layer DiffID, for example by using [tar-split][] to save the tar headers. - -NOTE: Do not confuse DiffIDs with [layer digests](manifest.md#image-manifest-property-descriptions), often referenced in the manifest, which are digests over compressed or uncompressed content. - -### Layer ChainID - -For convenience, it is sometimes useful to refer to a stack of layers with a single identifier. -While a layer's `DiffID` identifies a single changeset, the `ChainID` identifies the subsequent application of those changesets. -This ensures that we have handles referring to both the layer itself, as well as the result of the application of a series of changesets. -Use in combination with `rootfs.diff_ids` while applying layers to a root filesystem to uniquely and safely identify the result. - -#### Definition - -The `ChainID` of an applied set of layers is defined with the following recursion: - -``` -ChainID(L₀) = DiffID(L₀) -ChainID(L₀|...|Lₙ₋₁|Lₙ) = Digest(ChainID(L₀|...|Lₙ₋₁) + " " + DiffID(Lₙ)) -``` - -For this, we define the binary `|` operation to be the result of applying the right operand to the left operand. -For example, given base layer `A` and a changeset `B`, we refer to the result of applying `B` to `A` as `A|B`. - -Above, we define the `ChainID` for a single layer (`L₀`) as equivalent to the `DiffID` for that layer. -Otherwise, the `ChainID` for a set of applied layers (`L₀|...|Lₙ₋₁|Lₙ`) is defined as the recursion `Digest(ChainID(L₀|...|Lₙ₋₁) + " " + DiffID(Lₙ))`. - -#### Explanation - -Let's say we have layers A, B, C, ordered from bottom to top, where A is the base and C is the top. -Defining `|` as a binary application operator, the root filesystem may be `A|B|C`. -While it is implied that `C` is only useful when applied to `A|B`, the identifier `C` is insufficient to identify this result, as we'd have the equality `C = A|B|C`, which isn't true. - -The main issue is when we have two definitions of `C`, `C = C` and `C = A|B|C`. -If this is true (with some handwaving), `C = x|C` where `x = any application`. -This means that if an attacker can define `x`, relying on `C` provides no guarantee that the layers were applied in any order. - -The `ChainID` addresses this problem by being defined as a compound hash. -__We differentiate the changeset `C`, from the order-dependent application `A|B|C` by saying that the resulting rootfs is identified by ChainID(A|B|C), which can be calculated by `ImageConfig.rootfs`.__ - -Let's expand the definition of `ChainID(A|B|C)` to explore its internal structure: - -``` -ChainID(A) = DiffID(A) -ChainID(A|B) = Digest(ChainID(A) + " " + DiffID(B)) -ChainID(A|B|C) = Digest(ChainID(A|B) + " " + DiffID(C)) -``` - -We can replace each definition and reduce to a single equality: - -``` -ChainID(A|B|C) = Digest(Digest(DiffID(A) + " " + DiffID(B)) + " " + DiffID(C)) -``` - -Hopefully, the above is illustrative of the _actual_ contents of the `ChainID`. -Most importantly, we can easily see that `ChainID(C) != ChainID(A|B|C)`, otherwise, `ChainID(C) = DiffID(C)`, which is the base case, could not be true. - -### ImageID - -Each image's ID is given by the SHA256 hash of its [configuration JSON](#image-json). -It is represented as a hexadecimal encoding of 256 bits, e.g., `sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9`. -Since the [configuration JSON](#image-json) that gets hashed references hashes of each layer in the image, this formulation of the ImageID makes images content-addressable. - -## Properties - -Note: Any OPTIONAL field MAY also be set to null, which is equivalent to being absent. - -- **created** *string*, OPTIONAL - - An combined date and time at which the image was created, formatted as defined by [RFC 3339, section 5.6][rfc3339-s5.6]. - -- **author** *string*, OPTIONAL - - Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image. - -- **architecture** *string*, REQUIRED - - The CPU architecture which the binaries in this image are built to run on. - Configurations SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for [`GOARCH`][go-environment]. - -- **os** *string*, REQUIRED - - The name of the operating system which the image is built to run on. - Configurations SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for [`GOOS`][go-environment]. - -- **config** *object*, OPTIONAL - - The execution parameters which SHOULD be used as a base when running a container using the image. - This field can be `null`, in which case any execution parameters should be specified at creation of the container. - - - **User** *string*, OPTIONAL - - The username or UID which is a platform-specific structure that allows specific control over which user the process run as. - This acts as a default value to use when the value is not specified when creating a container. - For Linux based systems, all of the following are valid: `user`, `uid`, `user:group`, `uid:gid`, `uid:group`, `user:gid`. - If `group`/`gid` is not specified, the default group and supplementary groups of the given `user`/`uid` in `/etc/passwd` from the container are applied. - - - **ExposedPorts** *object*, OPTIONAL - - A set of ports to expose from a container running this image. - Its keys can be in the format of: -`port/tcp`, `port/udp`, `port` with the default protocol being `tcp` if not specified. - These values act as defaults and are merged with any specified when creating a container. - **NOTE:** This JSON structure value is unusual because it is a direct JSON serialization of the Go type `map[string]struct{}` and is represented in JSON as an object mapping its keys to an empty object. - - - **Env** *array of strings*, OPTIONAL - - Entries are in the format of `VARNAME=VARVALUE`. - These values act as defaults and are merged with any specified when creating a container. - - - **Entrypoint** *array of strings*, OPTIONAL - - A list of arguments to use as the command to execute when the container starts. - These values act as defaults and may be replaced by an entrypoint specified when creating a container. - - - **Cmd** *array of strings*, OPTIONAL - - Default arguments to the entrypoint of the container. - These values act as defaults and may be replaced by any specified when creating a container. - If an `Entrypoint` value is not specified, then the first entry of the `Cmd` array SHOULD be interpreted as the executable to run. - - - **Volumes** *object*, OPTIONAL - - A set of directories describing where the process is likely write data specific to a container instance. - **NOTE:** This JSON structure value is unusual because it is a direct JSON serialization of the Go type `map[string]struct{}` and is represented in JSON as an object mapping its keys to an empty object. - - - **WorkingDir** *string*, OPTIONAL - - Sets the current working directory of the entrypoint process in the container. - This value acts as a default and may be replaced by a working directory specified when creating a container. - - - **Labels** *object*, OPTIONAL - - The field contains arbitrary metadata for the container. - This property MUST use the [annotation rules](annotations.md#rules). - - - **StopSignal** *string*, OPTIONAL - - The field contains the system call signal that will be sent to the container to exit. The signal can be a signal name in the format `SIGNAME`, for instance `SIGKILL` or `SIGRTMIN+3`. - -- **rootfs** *object*, REQUIRED - - The rootfs key references the layer content addresses used by the image. - This makes the image config hash depend on the filesystem hash. - - - **type** *string*, REQUIRED - - MUST be set to `layers`. - Implementations MUST generate an error if they encounter a unknown value while verifying or unpacking an image. - - - **diff_ids** *array of strings*, REQUIRED - - An array of layer content hashes (`DiffIDs`), in order from first to last. - -- **history** *array of objects*, OPTIONAL - - Describes the history of each layer. - The array is ordered from first to last. - The object has the following fields: - - - **created** *string*, OPTIONAL - - A combined date and time at which the layer was created, formatted as defined by [RFC 3339, section 5.6][rfc3339-s5.6]. - - - **author** *string*, OPTIONAL - - The author of the build point. - - - **created_by** *string*, OPTIONAL - - The command which created the layer. - - - **comment** *string*, OPTIONAL - - A custom message set when creating the layer. - - - **empty_layer** *boolean*, OPTIONAL - - This field is used to mark if the history item created a filesystem diff. - It is set to true if this history item doesn't correspond to an actual layer in the rootfs section (for example, Dockerfile's [ENV](https://docs.docker.com/engine/reference/builder/#/env) command results in no change to the filesystem). - -Any extra fields in the Image JSON struct are considered implementation specific and MUST be ignored by any implementations which are unable to interpret them. - -Whitespace is OPTIONAL and implementations MAY have compact JSON with no whitespace. - -## Example - -Here is an example image configuration JSON document: - -```json,title=Image%20JSON&mediatype=application/vnd.oci.image.config.v1%2Bjson -{ - "created": "2015-10-31T22:22:56.015925234Z", - "author": "Alyssa P. Hacker ", - "architecture": "amd64", - "os": "linux", - "config": { - "User": "alice", - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=oci_is_a", - "BAR=well_written_spec" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {} - }, - "WorkingDir": "/home/alice", - "Labels": { - "com.example.project.git.url": "https://example.com/project.git", - "com.example.project.git.commit": "45a939b2999782a3f005621a8d0f29aa387e1d6b" - } - }, - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - ], - "type": "layers" - }, - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", - "empty_layer": true - } - ] -} -``` - -[rfc3339-s5.6]: https://tools.ietf.org/html/rfc3339#section-5.6 -[go-environment]: https://golang.org/doc/install/source#environment -[tar-split]: https://github.com/vbatts/tar-split diff --git a/vendor/github.com/opencontainers/image-spec/considerations.md b/vendor/github.com/opencontainers/image-spec/considerations.md deleted file mode 100644 index 7b53c343..00000000 --- a/vendor/github.com/opencontainers/image-spec/considerations.md +++ /dev/null @@ -1,137 +0,0 @@ -# Extensibility - -Implementations that are reading/processing [manifests](manifest.md) or [image indexes](image-index.md) MUST NOT generate an error if they encounter an unknown property. -Instead they MUST ignore unknown properties. - -# Canonicalization - -* OCI Images are [content-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage). See [descriptors](descriptor.md) for more. -* One benefit of content-addressable storage is easy deduplication. -* Many images might depend on a particular [layer](layer.md), but there will only be one blob in the [store](image-layout.md). -* With a different serialization, that same semantic layer would have a different hash, and if both versions of the layer are referenced there will be two blobs with the same semantic content. -* To allow efficient storage, implementations serializing content for blobs SHOULD use a canonical serialization. -* This increases the chance that different implementations can push the same semantic content to the store without creating redundant blobs. - -## JSON - -[JSON][] content SHOULD be serialized as [canonical JSON][canonical-json]. -Of the [OCI Image Format Specification media types](media-types.md), all the types ending in `+json` contain JSON content. -Implementations: - -* [Go][]: [github.com/docker/go][], which claims to implement [canonical JSON][canonical-json] except for Unicode normalization. - -[canonical-json]: http://wiki.laptop.org/go/Canonical_JSON -[github.com/docker/go]: https://github.com/docker/go/ -[Go]: https://golang.org/ -[JSON]: http://json.org/ - -# EBNF - -For field formats described in this specification, we use a limited subset of [Extended Backus-Naur Form][ebnf], similar to that used by the [XML specification][xmlebnf]. -Grammars present in the OCI specification are regular and can be converted to a single regular expressions. -However, regular expressions are avoided to limit abiguity between regular expression syntax. -By defining a subset of EBNF used here, the possibility of variation, misunderstanding or ambiguities from linking to a larger specification can be avoided. - -Grammars are made up of rules in the following form: - -``` -symbol ::= expression -``` - -We can say we have the production identified by symbol if the input is matched by the expression. -Whitespace is completely ignored in rule definitions. - -## Expressions - -The simplest expression is the literal, surrounded by quotes: - -``` -literal ::= "matchthis" -``` - -The above expression defines a symbol, "literal", that matches the exact input of "matchthis". -Character classes are delineated by brackets (`[]`), describing either a set, range or multiple range of characters: - -``` -set := [abc] -range := [A-Z] -``` - -The above symbol "set" would match one character of either "a", "b" or "c". -The symbol "range" would match any character, "A" to "Z", inclusive. -Currently, only matching for 7-bit ascii literals and character classes is defined, as that is all that is required by this specification. -Multiple character ranges and explicit characters can be specified in a single character classes, as follows: - -``` -multipleranges := [a-zA-Z=-] -``` - -The above matches the characters in the range `A` to `Z`, `a` to `z` and the individual characters `-` and `=`. - -Expressions can be made up of one or more expressions, such that one must be followed by the other. -This is known as an implicit concatenation operator. -For example, to satisfy the following rule, both `A` and `B` must be matched to satisfy the rule: - -``` -symbol ::= A B -``` - -Each expression must be matched once and only once, `A` followed by `B`. -To support the description of repetition and optional match criteria, the postfix operators `*` and `+` are defined. -`*` indicates that the preceeding expression can be matched zero or more times. -`+` indicates that the preceeding expression must be matched one or more times. -These appear in the following form: - -``` -zeroormore ::= expression* -oneormore ::= expression+ -``` - -Parentheses are used to group expressions into a larger expression: - -``` -group ::= (A B) -``` - -Like simpler expressions above, operators can be applied to groups, as well. -To allow for alternates, we also define the infix operator `|`. - -``` -oneof ::= A | B -``` - -The above indicates that the expression should match one of the expressions, `A` or `B`. - -## Precedence - -The operator precedence is in the following order: - -- Terminals (literals and character classes) -- Grouping `()` -- Unary operators `+*` -- Concatenation -- Alternates `|` - -The precedence can be better described using grouping to show equivalents. -Concatenation has higher precedence than alernates, such `A B | C D` is equivalent to `(A B) | (C D)`. -Unary operators have higher precedence than alternates and concatenation, such that `A+ | B+` is equivalent to `(A+) | (B+)`. - -## Examples - -The following combines the previous definitions to match a simple, relative path name, describing the individual components: - -``` -path ::= component ("/" component)* -component ::= [a-z]+ -``` - -The production "component" is one or more lowercase letters. -A "path" is then at least one component, possibly followed by zero or more slash-component pairs. -The above can be converted into the following regular expression: - -``` -[a-z]+(?:/[a-z]+)* -``` - -[ebnf]: https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form -[xmlebnf]: https://www.w3.org/TR/REC-xml/#sec-notation diff --git a/vendor/github.com/opencontainers/image-spec/conversion.md b/vendor/github.com/opencontainers/image-spec/conversion.md deleted file mode 100644 index 285c2037..00000000 --- a/vendor/github.com/opencontainers/image-spec/conversion.md +++ /dev/null @@ -1,120 +0,0 @@ -# Conversion to OCI Runtime Configuration - -When extracting an OCI Image into an [OCI Runtime bundle][oci-runtime-bundle], two orthogonal components of the extraction are relevant: - -1. Extraction of the root filesystem from the set of [filesystem layers](layer.md). -2. Conversion of the [image configuration blob](config.md) to an [OCI Runtime configuration blob][oci-runtime-config]. - -This section defines how to convert an `application/vnd.oci.image.config.v1+json` blob to an [OCI runtime configuration blob][oci-runtime-config] (the latter component of extraction). -The former component of extraction is defined [elsewhere](layer.md) and is orthogonal to configuration of a runtime bundle. -The values of runtime configuration properties not specified by this document are implementation-defined. - -A converter MUST rely on the OCI image configuration to build the OCI runtime configuration as described by this document; this will create the "default generated runtime configuration". - -The "default generated runtime configuration" MAY be overridden or combined with externally provided inputs from the caller. -In addition, a converter MAY have its own implementation-defined defaults and extensions which MAY be combined with the "default generated runtime configuration". -The restrictions in this document refer only to combining implementation-defined defaults with the "default generated runtime configuration". -Externally provided inputs are considered to be a modification of the `application/vnd.oci.image.config.v1+json` used as a source, and such modifications have no restrictions. - -For example, externally provided inputs MAY cause an environment variable to be added, removed or changed. -However an implementation-defined default SHOULD NOT result in an environment variable being removed or changed. - -[oci-runtime-bundle]: https://github.com/opencontainers/runtime-spec/blob/v1.0.0/bundle.md -[oci-runtime-config]: https://github.com/opencontainers/runtime-spec/blob/v1.0.0/config.md - -## Verbatim Fields - -Certain image configuration fields have an identical counterpart in the runtime configuration. -Some of these are purely annotation-based fields, and have been extracted into a [separate subsection](#annotation-fields). -A compliant configuration converter MUST extract the following fields verbatim to the corresponding field in the generated runtime configuration: - -| Image Field | Runtime Field | Notes | -| ------------------- | --------------- | ----- | -| `Config.WorkingDir` | `process.cwd` | | -| `Config.Env` | `process.env` | 1 | -| `Config.Entrypoint` | `process.args` | 2 | -| `Config.Cmd` | `process.args` | 2 | - -1. The converter MAY add additional entries to `process.env` but it SHOULD NOT add entries that have variable names present in `Config.Env`. -2. If both `Config.Entrypoint` and `Config.Cmd` are specified, the converter MUST append the value of `Config.Cmd` to the value of `Config.Entrypoint` and set `process.args` to that combined value. - -### Annotation Fields - -These fields all affect the `annotations` of the runtime configuration, and are thus subject to [precedence](#annotations). - -| Image Field | Runtime Field | Notes | -| ------------------- | --------------- | ----- | -| `author` | `annotations` | 1,2 | -| `created` | `annotations` | 1,3 | -| `Config.Labels` | `annotations` | | -| `Config.StopSignal` | `annotations` | 1,4 | - -1. If a user has explicitly specified this annotation with `Config.Labels`, then the value specified in this field takes lower [precedence](#annotations) and the converter MUST instead use the value from `Config.Labels`. -2. The value of this field MUST be set as the value of `org.opencontainers.image.author` in `annotations`. -3. The value of this field MUST be set as the value of `org.opencontainers.image.created` in `annotations`. -4. The value of this field MUST be set as the value of `org.opencontainers.image.stopSignal` in `annotations`. - -## Parsed Fields - -Certain image configuration fields have a counterpart that must first be translated. -A compliant configuration converter SHOULD parse all of these fields and set the corresponding fields in the generated runtime configuration: - -| Image Field | Runtime Field | -| ------------------- | --------------- | -| `Config.User` | `process.user.*` | - -The method of parsing the above image fields are described in the following sections. - -### `Config.User` - -If the values of [`user` or `group`](config.md#properties) in `Config.User` are numeric (`uid` or `gid`) then the values MUST be copied verbatim to `process.user.uid` and `process.user.gid` respectively. -If the values of [`user` or `group`](config.md#properties) in `Config.User` are not numeric (`user` or `group`) then a converter SHOULD resolve the user information using a method appropriate for the container's context. -For Unix-like systems, this MAY involve resolution through NSS or parsing `/etc/passwd` from the extracted container's root filesystem to determine the values of `process.user.uid` and `process.user.gid`. - -In addition, a converter SHOULD set the value of `process.user.additionalGids` to a value corresponding to the user in the container's context described by `Config.User`. -For Unix-like systems, this MAY involve resolution through NSS or parsing `/etc/group` and determining the group memberships of the user specified in `process.user.uid`. -If the value of [`user`](config.md#properties) in `Config.User` is numeric, the converter SHOULD NOT modify `process.user.additionalGids`. - -If `Config.User` is not defined, the converted `process.user` value is implementation-defined. -If `Config.User` does not correspond to a user in the container's context, the converter MUST return an error. - -## Optional Fields - -Certain image configuration fields are not applicable to all conversion use cases, and thus are optional for configuration converters to implement. -A compliant configuration converter SHOULD provide a way for users to extract these fields into the generated runtime configuration: - -| Image Field | Runtime Field | Notes | -| --------------------- | ------------------ | ----- | -| `Config.ExposedPorts` | `annotations` | 1 | -| `Config.Volumes` | `mounts` | 2 | - -1. The runtime configuration does not have a corresponding field for this image field. - However, converters SHOULD set the [`org.opencontainers.image.exposedPorts` annotation](#config.exposedports). -2. Implementations SHOULD provide mounts for these locations such that application data is not written to the container's root filesystem. - If a converter implements conversion for this field using mountpoints, it SHOULD set the `destination` of the mountpoint to the value specified in `Config.Volumes`. - An implementation MAY seed the contents of the mount with data in the image at the same location. - If a _new_ image is created from a container based on the image described by this configuration, data in these paths SHOULD NOT be included in the _new_ image. - The other `mounts` fields are platform and context dependent, and thus are implementation-defined. - Note that the implementation of `Config.Volumes` need not use mountpoints, as it is effectively a mask of the filesystem. - -### `Config.ExposedPorts` - -The OCI runtime configuration does not provide a way of expressing the concept of "container exposed ports". -However, converters SHOULD set the **org.opencontainers.image.exposedPorts** annotation, unless doing so will [cause a conflict](#annotations). - -**org.opencontainers.image.exposedPorts** is the list of values that correspond to the [keys defined for `Config.ExposedPorts`](config.md) (string, comma-separated values). - -## Annotations - -There are three ways of annotating an OCI image in this specification: - -1. `Config.Labels` in the [configuration](config.md) of the image. -2. `annotations` in the [manifest](manifest.md) of the image. -3. `annotations` in the [image index](image-index.md) of the image. - -In addition, there are also implicit annotations that are defined by this section which are determined from the values of the image configuration. -A converter SHOULD NOT attempt to extract annotations from [manifests](manifest.md) or [image indices](image-index.md). -If there is a conflict (same key but different value) between an implicit annotation (or annotation in [manifests](manifest.md) or [image indices](image-index.md)) and an explicitly specified annotation in `Config.Labels`, the value specified in `Config.Labels` MUST take precedence. - -A converter MAY add annotations which have keys not specified in the image. -A converter MUST NOT modify the values of annotations specified in the image. diff --git a/vendor/github.com/opencontainers/image-spec/descriptor.md b/vendor/github.com/opencontainers/image-spec/descriptor.md deleted file mode 100644 index 570985c5..00000000 --- a/vendor/github.com/opencontainers/image-spec/descriptor.md +++ /dev/null @@ -1,185 +0,0 @@ -# OCI Content Descriptors - -* An OCI image consists of several different components, arranged in a [Merkle Directed Acyclic Graph (DAG)](https://en.wikipedia.org/wiki/Merkle_tree). -* References between components in the graph are expressed through _Content Descriptors_. -* A Content Descriptor (or simply _Descriptor_) describes the disposition of the targeted content. -* A Content Descriptor includes the type of the content, a content identifier (_digest_), and the byte-size of the raw content. -* Descriptors SHOULD be embedded in other formats to securely reference external content. -* Other formats SHOULD use descriptors to securely reference external content. - -This section defines the `application/vnd.oci.descriptor.v1+json` [media type](media-types.md). - -## Properties - -A descriptor consists of a set of properties encapsulated in key-value fields. - -The following fields contain the primary properties that constitute a Descriptor: - -- **`mediaType`** *string* - - This REQUIRED property contains the media type of the referenced content. - Values MUST comply with [RFC 6838][rfc6838], including the [naming requirements in its section 4.2][rfc6838-s4.2]. - - The OCI image specification defines [several of its own MIME types](media-types.md) for resources defined in the specification. - -- **`digest`** *string* - - This REQUIRED property is the _digest_ of the targeted content, conforming to the requirements outlined in [Digests](#digests). - Retrieved content SHOULD be verified against this digest when consumed via untrusted sources. - -- **`size`** *int64* - - This REQUIRED property specifies the size, in bytes, of the raw content. - This property exists so that a client will have an expected size for the content before processing. - If the length of the retrieved content does not match the specified length, the content SHOULD NOT be trusted. - -- **`urls`** *array of strings* - - This OPTIONAL property specifies a list of URIs from which this object MAY be downloaded. - Each entry MUST conform to [RFC 3986][rfc3986]. - Entries SHOULD use the `http` and `https` schemes, as defined in [RFC 7230][rfc7230-s2.7]. - -- **`annotations`** *string-string map* - - This OPTIONAL property contains arbitrary metadata for this descriptor. - This OPTIONAL property MUST use the [annotation rules](annotations.md#rules). - -Descriptors pointing to [`application/vnd.oci.image.manifest.v1+json`](manifest.md) SHOULD include the extended field `platform`, see [Image Index Property Descriptions](image-index.md#image-index-property-descriptions) for details. - -### Reserved - -The following field keys are reserved and MUST NOT be used by other specifications. - -- **`data`** *string* - - This key is RESERVED for future versions of the specification. - -All other fields may be included in other OCI specifications. -Extended _Descriptor_ field additions proposed in other OCI specifications SHOULD first be considered for addition into this specification. - -## Digests - -The _digest_ property of a Descriptor acts as a content identifier, enabling [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -It uniquely identifies content by taking a [collision-resistant hash](https://en.wikipedia.org/wiki/Cryptographic_hash_function) of the bytes. -If the _digest_ can be communicated in a secure manner, one can verify content from an insecure source by recalculating the digest independently, ensuring the content has not been modified. - -The value of the `digest` property is a string consisting of an _algorithm_ portion and an _encoded_ portion. -The _algorithm_ specifies the cryptographic hash function and encoding used for the digest; the _encoded_ portion contains the encoded result of the hash function. - -A digest string MUST match the following [grammar](considerations.md#ebnf): - -``` -digest ::= algorithm ":" encoded -algorithm ::= algorithm-component (algorithm-separator algorithm-component)* -algorithm-component ::= [a-z0-9]+ -algorithm-separator ::= [+._-] -encoded ::= [a-zA-Z0-9=_-]+ -``` - -Note that _algorithm_ MAY impose algorithm-specific restriction on the grammar of the _encoded_ portion. -See also [Registered Algorithms](#registered-algorithms). - -Some example digest strings include the following: - -digest | algorithm | Registered | ---------------------------------------------------------------------------|---------------------|------------| -`sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b` | [SHA-256](#sha-256) | Yes | -`sha512:401b09eab3c013d4ca54922bb802bec8fd5318192b0a75f201d8b372742...` | [SHA-512](#sha-512) | Yes | -`multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8` | Multihash | No | -`sha256+b64u:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564` | SHA-256 with urlsafe base64 | No | - -Please see [Registered Algorithms](#registered-algorithms) for a list of registered algorithms. - -Implementations SHOULD allow digests with unrecognized algorithms to pass validation if they comply with the above grammar. -While `sha256` will only use hex encoded digests, separators in _algorithm_ and alphanumerics in _encoded_ are included to allow for extensions. -As an example, we can parameterize the encoding and algorithm as `multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8`, which would be considered valid but unregistered by this specification. - -### Verification - -Before consuming content targeted by a descriptor from untrusted sources, the byte content SHOULD be verified against the digest string. -Before calculating the digest, the size of the content SHOULD be verified to reduce hash collision space. -Heavy processing before calculating a hash SHOULD be avoided. -Implementations MAY employ [canonicalization](canonicalization.md#canonicalization) of the underlying content to ensure stable content identifiers. - -### Digest calculations - -A _digest_ is calculated by the following pseudo-code, where `H` is the selected hash algorithm, identified by string ``: -``` -let ID(C) = Descriptor.digest -let C = -let D = ':' + Encode(H(C)) -let verified = ID(C) == D -``` -Above, we define the content identifier as `ID(C)`, extracted from the `Descriptor.digest` field. -Content `C` is a string of bytes. -Function `H` returns the hash of `C` in bytes and is passed to function `Encode` and prefixed with the algorithm to obtain the digest. -The result `verified` is true if `ID(C)` is equal to `D`, confirming that `C` is the content identified by `D`. -After verification, the following is true: - -``` -D == ID(C) == ':' + Encode(H(C)) -``` - -The _digest_ is confirmed as the content identifier by independently calculating the _digest_. - -### Registered algorithms - -While the _algorithm_ component of the digest string allows the use of a variety of cryptographic algorithms, compliant implementations SHOULD use [SHA-256](#sha-256). - -The following algorithm identifiers are currently defined by this specification: - -| algorithm identifier | algorithm | -|----------------------|---------------------| -| `sha256` | [SHA-256](#sha-256) | -| `sha512` | [SHA-512](#sha-512) | - -If a useful algorithm is not included in the above table, it SHOULD be submitted to this specification for registration. - -#### SHA-256 - -[SHA-256][rfc4634-s4.1] is a collision-resistant hash function, chosen for ubiquity, reasonable size and secure characteristics. -Implementations MUST implement SHA-256 digest verification for use in descriptors. - -When the _algorithm identifier_ is `sha256`, the _encoded_ portion MUST match `/[a-f0-9]{64}/`. -Note that `[A-F]` MUST NOT be used here. - -#### SHA-512 - -[SHA-512][rfc4634-s4.2] is a collision-resistant hash function which [may be more perfomant][sha256-vs-sha512] than [SHA-256](#sha-256) on some CPUs. -Implementations MAY implement SHA-512 digest verification for use in descriptors. - -When the _algorithm identifier_ is `sha512`, the _encoded_ portion MUST match `/[a-f0-9]{128}/`. -Note that `[A-F]` MUST NOT be used here. - -## Examples - -The following example describes a [_Manifest_](manifest.md#image-manifest) with a content identifier of "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270" and a size of 7682 bytes: - -```json,title=Content%20Descriptor&mediatype=application/vnd.oci.descriptor.v1%2Bjson -{ - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7682, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270" -} -``` - -In the following example, the descriptor indicates that the referenced manifest is retrievable from a particular URL: - -```json,title=Content%20Descriptor&mediatype=application/vnd.oci.descriptor.v1%2Bjson -{ - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7682, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", - "urls": [ - "https://example.com/example-manifest" - ] -} -``` - -[rfc3986]: https://tools.ietf.org/html/rfc3986 -[rfc4634-s4.1]: https://tools.ietf.org/html/rfc4634#section-4.1 -[rfc4634-s4.2]: https://tools.ietf.org/html/rfc4634#section-4.2 -[rfc6838]: https://tools.ietf.org/html/rfc6838 -[rfc6838-s4.2]: https://tools.ietf.org/html/rfc6838#section-4.2 -[rfc7230-s2.7]: https://tools.ietf.org/html/rfc7230#section-2.7 -[sha256-vs-sha512]: https://groups.google.com/a/opencontainers.org/forum/#!topic/dev/hsMw7cAwrZE diff --git a/vendor/github.com/opencontainers/image-spec/image-index.md b/vendor/github.com/opencontainers/image-spec/image-index.md deleted file mode 100644 index c0a2ca4d..00000000 --- a/vendor/github.com/opencontainers/image-spec/image-index.md +++ /dev/null @@ -1,129 +0,0 @@ -# OCI Image Index Specification - -The image index is a higher-level manifest which points to specific [image manifests](manifest.md), ideal for one or more platforms. -While the use of an image index is OPTIONAL for image providers, image consumers SHOULD be prepared to process them. - -This section defines the `application/vnd.oci.image.index.v1+json` [media type](media-types.md). -For the media type(s) that this document is compatible with, see the [matrix][matrix]. - -## *Image Index* Property Descriptions - -- **`schemaVersion`** *int* - - This REQUIRED property specifies the image manifest schema version. - For this version of the specification, this MUST be `2` to ensure backward compatibility with older versions of Docker. - The value of this field will not change. - This field MAY be removed in a future version of the specification. - -- **`mediaType`** *string* - - This property is *reserved* for use, to [maintain compatibility][matrix]. - When used, this field contains the media type of this document, which differs from the [descriptor](descriptor.md#properties) use of `mediaType`. - -- **`manifests`** *array of objects* - - This REQUIRED property contains a list of [manifests](manifest.md) for specific platforms. - While this property MUST be present, the size of the array MAY be zero. - - Each object in `manifests` includes a set of [descriptor properties](descriptor.md#properties) with the following additional properties and restrictions: - - - **`mediaType`** *string* - - This [descriptor property](descriptor.md#properties) has additional restrictions for `manifests`. - Implementations MUST support at least the following media types: - - - [`application/vnd.oci.image.manifest.v1+json`](manifest.md) - - Image indexes concerned with portability SHOULD use one of the above media types. - Future versions of the spec MAY use a different mediatype (i.e. a new versioned format). - An encountered `mediaType` that is unknown SHOULD be safely ignored. - - - **`platform`** *object* - - This OPTIONAL property describes the minimum runtime requirements of the image. - This property SHOULD be present if its target is platform-specific. - - - **`architecture`** *string* - - This REQUIRED property specifies the CPU architecture. - Image indexes SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for [`GOARCH`][go-environment2]. - - - **`os`** *string* - - This REQUIRED property specifies the operating system. - Image indexes SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for [`GOOS`][go-environment2]. - - - **`os.version`** *string* - - This OPTIONAL property specifies the version of the operating system targeted by the referenced blob. - Implementations MAY refuse to use manifests where `os.version` is not known to work with the host OS version. - Valid values are implementation-defined. e.g. `10.0.14393.1066` on `windows`. - - - **`os.features`** *array of strings* - - This OPTIONAL property specifies an array of strings, each specifying a mandatory OS feature. - When `os` is `windows`, image indexes SHOULD use, and implementations SHOULD understand the following values: - - - `win32k`: image requires `win32k.sys` on the host (Note: `win32k.sys` is missing on Nano Server) - - When `os` is not `windows`, values are implementation-defined and SHOULD be submitted to this specification for standardization. - - - **`variant`** *string* - - This OPTIONAL property specifies the variant of the CPU. - Image indexes SHOULD use, and implementations SHOULD understand, values listed in the following table. - When the variant of the CPU is not listed in the table, values are implementation-defined and SHOULD be submitted to this specification for standardization. - - | ISA/ABI | `architecture` | `variant` | - |-----------------|----------------|-------------| - | ARM 32-bit, v6 | `arm` | `v6` | - | ARM 32-bit, v7 | `arm` | `v7` | - | ARM 32-bit, v8 | `arm` | `v8` | - | ARM 64-bit, v8 | `arm64` | `v8` | - - - **`features`** *array of strings* - - This property is RESERVED for future versions of the specification. - -- **`annotations`** *string-string map* - - This OPTIONAL property contains arbitrary metadata for the image index. - This OPTIONAL property MUST use the [annotation rules](annotations.md#rules). - - See [Pre-Defined Annotation Keys](annotations.md#pre-defined-annotation-keys). - -## Example Image Index - -*Example showing a simple image index pointing to image manifests for two platforms:* -```json,title=Image%20Index&mediatype=application/vnd.oci.image.index.v1%2Bjson -{ - "schemaVersion": 2, - "manifests": [ - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7143, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - "platform": { - "architecture": "ppc64le", - "os": "linux" - } - }, - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7682, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", - "platform": { - "architecture": "amd64", - "os": "linux" - } - } - ], - "annotations": { - "com.example.key1": "value1", - "com.example.key2": "value2" - } -} -``` - -[go-environment2]: https://golang.org/doc/install/source#environment -[matrix]: media-types.md#compatibility-matrix diff --git a/vendor/github.com/opencontainers/image-spec/image-layout.md b/vendor/github.com/opencontainers/image-spec/image-layout.md deleted file mode 100644 index 2915c894..00000000 --- a/vendor/github.com/opencontainers/image-spec/image-layout.md +++ /dev/null @@ -1,205 +0,0 @@ -## OCI Image Layout Specification - -* The OCI Image Layout is directory structure for OCI content-addressable blobs and [location-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage#Content-addressed_vs._location-addressed) references (refs). -* This layout MAY be used in a variety of different transport mechanisms: archive formats (e.g. tar, zip), shared filesystem environments (e.g. nfs), or networked file fetching (e.g. http, ftp, rsync). - -Given an image layout and a ref, a tool can create an [OCI Runtime Specification bundle](https://github.com/opencontainers/runtime-spec/blob/v1.0.0/bundle.md) by: - -* Following the ref to find a [manifest](manifest.md#image-manifest), possibly via an [image index](image-index.md) -* [Applying the filesystem layers](layer.md#applying) in the specified order -* Converting the [image configuration](config.md) into an [OCI Runtime Specification `config.json`](https://github.com/opencontainers/runtime-spec/blob/v1.0.0/config.md) - -# Content - -The image layout is as follows: - -- `blobs` directory - - Contains content-addressable blobs - - A blob has no schema and SHOULD be considered opaque - - Directory MUST exist and MAY be empty - - See [blobs](#blobs) section -- `oci-layout` file - - It MUST exist - - It MUST be a JSON object - - It MUST contain an `imageLayoutVersion` field - - See [oci-layout file](#oci-layout-file) section - - It MAY include additional fields -- `index.json` file - - It MUST exist - - It MUST be an [image index](image-index.md) JSON object. - - See [index.json](#indexjson-file) section - -## Example Layout - -This is an example image layout: - -``` -$ cd example.com/app/ -$ find . -type f -./index.json -./oci-layout -./blobs/sha256/3588d02542238316759cbf24502f4344ffcc8a60c803870022f335d1390c13b4 -./blobs/sha256/4b0bc1c4050b03c95ef2a8e36e25feac42fd31283e8c30b3ee5df6b043155d3c -./blobs/sha256/7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768 -``` - -Blobs are named by their contents: - -``` -$ shasum -a 256 ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 -afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 -``` - -## Blobs - -* Object names in the `blobs` subdirectories are composed of a directory for each hash algorithm, the children of which will contain the actual content. -* The content of `blobs//` MUST match the digest `:` (referenced per [descriptor](descriptor.md#digests)). For example, the content of `blobs/sha256/da39a3ee5e6b4b0d3255bfef95601890afd80709` MUST match the digest `sha256:da39a3ee5e6b4b0d3255bfef95601890afd80709`. -* The character set of the entry name for `` and `` MUST match the respective grammar elements described in [descriptor](descriptor.md#digests). -* The blobs directory MAY contain blobs which are not referenced by any of the [refs](#indexjson-file). -* The blobs directory MAY be missing referenced blobs, in which case the missing blobs SHOULD be fulfilled by an external blob store. - -### Example Blobs - -``` -$ cat ./blobs/sha256/9b97579de92b1c195b85bb42a11011378ee549b02d7fe9c17bf2a6b35d5cb079 | jq -{ - "schemaVersion": 2, - "manifests": [ - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7143, - "digest": "sha256:afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51", - "platform": { - "architecture": "ppc64le", - "os": "linux" - } - }, -... -``` - -``` -$ cat ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 | jq -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "size": 7023, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270" - }, - "layers": [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 32654, - "digest": "sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0" - }, -... -``` - -``` -$ cat ./blobs/sha256/5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270 | jq -{ - "architecture": "amd64", - "author": "Alyssa P. Hacker ", - "config": { - "Hostname": "8dfe43d80430", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Cmd": null, - "Image": "sha256:6986ae504bbf843512d680cc959484452034965db15f75ee8bdd1b107f61500b", -... -``` - -``` -$ cat ./blobs/sha256/9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0 -[gzipped tar stream] -``` - -## oci-layout file - -This JSON object serves as a marker for the base of an Open Container Image Layout and to provide the version of the image-layout in use. -The `imageLayoutVersion` value will align with the OCI Image Specification version at the time changes to the layout are made, and will pin a given version until changes to the image layout are required. -This section defines the `application/vnd.oci.layout.header.v1+json` [media type](media-types.md). - -### oci-layout Example - -```json,title=OCI%20Layout&mediatype=application/vnd.oci.layout.header.v1%2Bjson -{ - "imageLayoutVersion": "1.0.0" -} -``` - -## index.json file - -This REQUIRED file is the entry point for references and descriptors of the image-layout. -The [image index](image-index.md) is a multi-descriptor entry point. - -This index provides an established path (`/index.json`) to have an entry point for an image-layout and to discover auxiliary descriptors. - -* No semantic restriction is given for the "org.opencontainers.image.ref.name" annotation of descriptors. -* In general the `mediaType` of each [descriptor][descriptors] object in the `manifests` field will be either `application/vnd.oci.image.index.v1+json` or `application/vnd.oci.image.manifest.v1+json`. -* Future versions of the spec MAY use a different mediatype (i.e. a new versioned format). -* An encountered `mediaType` that is unknown SHOULD be safely ignored. - - -**Implementor's Note:** -A common use case of descriptors with a "org.opencontainers.image.ref.name" annotation is representing a "tag" for a container image. -For example, an image may have a tag for different versions or builds of the software. -In the wild you often see "tags" like "v1.0.0-vendor.0", "2.0.0-debug", etc. -Those tags will often be represented in an image-layout repository with matching "org.opencontainers.image.ref.name" annotations like "v1.0.0-vendor.0", "2.0.0-debug", etc. - - -### Index Example - -```json,title=Image%20Index&mediatype=application/vnd.oci.image.index.v1%2Bjson -{ - "schemaVersion": 2, - "manifests": [ - { - "mediaType": "application/vnd.oci.image.index.v1+json", - "size": 7143, - "digest": "sha256:0228f90e926ba6b96e4f39cf294b2586d38fbb5a1e385c05cd1ee40ea54fe7fd", - "annotations": { - "org.opencontainers.image.ref.name": "stable-release" - } - }, - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7143, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - "platform": { - "architecture": "ppc64le", - "os": "linux" - }, - "annotations": { - "org.opencontainers.image.ref.name": "v1.0" - } - }, - { - "mediaType": "application/xml", - "size": 7143, - "digest": "sha256:b3d63d132d21c3ff4c35a061adf23cf43da8ae054247e32faa95494d904a007e", - "annotations": { - "org.freedesktop.specifications.metainfo.version": "1.0", - "org.freedesktop.specifications.metainfo.type": "AppStream" - } - } - ], - "annotations": { - "com.example.index.revision": "r124356" - } -} -``` - -This illustrates an index that provides two named manifest references and an auxiliary mediatype for this image layout. - - -[descriptors]: ./descriptor.md diff --git a/vendor/github.com/opencontainers/image-spec/implementations.md b/vendor/github.com/opencontainers/image-spec/implementations.md deleted file mode 100644 index 31d8279e..00000000 --- a/vendor/github.com/opencontainers/image-spec/implementations.md +++ /dev/null @@ -1,21 +0,0 @@ -# OCI Image Implementations - -Projects or Companies currently adopting the OCI Image Specification - -* [projectatomic/skopeo](https://github.com/projectatomic/skopeo) -* [Amazon Elastic Container Registry (ECR)](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-manifest-formats.html) ([announcement](https://aws.amazon.com/about-aws/whats-new/2017/01/amazon-ecr-supports-docker-image-manifest-v2-schema-2/)) -* [openSUSE/umoci](https://github.com/openSUSE/umoci) -* [cloudfoundry/grootfs](https://github.com/cloudfoundry/grootfs) ([source](https://github.com/cloudfoundry/grootfs/blob/c3da26e1e463b51be1add289032f3dca6698b335/fetcher/remote/docker_src.go)) -* [Mesos plans](https://issues.apache.org/jira/browse/MESOS-5011) ([design doc](https://docs.google.com/document/d/1Pus7D-inIBoLSIPyu3rl_apxvUhtp3rp0_b0Ttr2Xww/edit#heading=h.hrvk2wboog4p)) -* [Docker](https://github.com/docker) - - [docker/docker (`docker save/load` WIP)](https://github.com/docker/docker/pull/26369) - - [docker/distribution (registry PR)](https://github.com/docker/distribution/pull/2076) -* [containerd/containerd](https://github.com/containerd/containerd) -* [Containers](https://github.com/containers/) - - [containers/build](https://github.com/containers/build) - - [containers/image](https://github.com/containers/image) -* [coreos/rkt](https://github.com/coreos/rkt) -* [box-builder/box](https://github.com/box-builder/box) -* [coolljt0725/docker2oci](https://github.com/coolljt0725/docker2oci) - -_(to add your project please open a [pull-request](https://github.com/opencontainers/image-spec/pulls))_ diff --git a/vendor/github.com/opencontainers/image-spec/layer.md b/vendor/github.com/opencontainers/image-spec/layer.md deleted file mode 100644 index 947037ce..00000000 --- a/vendor/github.com/opencontainers/image-spec/layer.md +++ /dev/null @@ -1,333 +0,0 @@ -# Image Layer Filesystem Changeset - -This document describes how to serialize a filesystem and filesystem changes like removed files into a blob called a layer. -One or more layers are applied on top of each other to create a complete filesystem. -This document will use a concrete example to illustrate how to create and consume these filesystem layers. - -This section defines the `application/vnd.oci.image.layer.v1.tar`, `application/vnd.oci.image.layer.v1.tar+gzip`, `application/vnd.oci.image.layer.nondistributable.v1.tar`, and `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip` [media types](media-types.md). - -## `+gzip` Media Types - -* The media type `application/vnd.oci.image.layer.v1.tar+gzip` represents an `application/vnd.oci.image.layer.v1.tar` payload which has been compressed with [gzip][rfc1952_2]. -* The media type `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip` represents an `application/vnd.oci.image.layer.nondistributable.v1.tar` payload which has been compressed with [gzip][rfc1952_2]. - -## Distributable Format - -* Layer Changesets for the [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` MUST be packaged in [tar archive][tar-archive]. -* Layer Changesets for the [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` MUST NOT include duplicate entries for file paths in the resulting [tar archive][tar-archive]. - -## Change Types - -Types of changes that can occur in a changeset are: - -* Additions -* Modifications -* Removals - -Additions and Modifications are represented the same in the changeset tar archive. - -Removals are represented using "[whiteout](#whiteouts)" file entries (See [Representing Changes](#representing-changes)). - -### File Types - -Throughout this document section, the use of word "files" or "entries" includes the following, where supported: - -* regular files -* directories -* sockets -* symbolic links -* block devices -* character devices -* FIFOs - -### File Attributes - -Where supported, MUST include file attributes for Additions and Modifications include: - -* Modification Time (`mtime`) -* User ID (`uid`) - * User Name (`uname`) *secondary to `uid`* -* Group ID (`gid `) - * Group Name (`gname`) *secondary to `gid`* -* Mode (`mode`) -* Extended Attributes (`xattrs`) -* Symlink reference (`linkname` + symbolic link type) -* [Hardlink](#hardlinks) reference (`linkname`) - -[Sparse files](https://en.wikipedia.org/wiki/Sparse_file) SHOULD NOT be used because they lack consistent support across tar implementations. - -#### Hardlinks - -* Hardlinks are a [POSIX concept](http://pubs.opengroup.org/onlinepubs/9699919799/functions/link.html) for having one or more directory entries for the same file on the same device. -* Not all filesystems support hardlinks (e.g. [FAT](https://en.wikipedia.org/wiki/File_Allocation_Table)). -* Hardlinks are possible with all [file types](#file-types) except `directories`. -* Non-directory files are considered "hardlinked" when their link count is greater than 1. -* Hardlinked files are on a same device (i.e. comparing Major:Minor pair) and have the same inode. -* The corresponding files that share the link with the > 1 linkcount may be outside the directory that the changeset is being produced from, in which case the `linkname` is not recorded in the changeset. -* Hardlinks are stored in a tar archive with type of a `1` char, per the [GNU Basic Tar Format][gnu-tar-standard] and [libarchive tar(5)][libarchive-tar]. -* While approaches to deriving new or changed hardlinks may vary, a possible approach is: - -``` -SET LinkMap to map[< Major:Minor String >]map[< inode integer >]< path string > -SET LinkNames to map[< src path string >]< dest path string > -FOR each path in root path - IF path type is directory - CONTINUE - ENDIF - SET filestat to stat(path) - IF filestat num of links == 1 - CONTINUE - ENDIF - IF LinkMap[filestat device][filestat inode] is not empty - SET LinkNames[path] to LinkMap[filestat device][filestat inode] - ELSE - SET LinkMap[filestat device][filestat inode] to path - ENDIF -END FOR -``` - -With this approach, the link map and links names of a directory could be compared against that of another directory to derive additions and changes to hardlinks. - -#### Platform-specific attributes - -Implementations on Windows MUST support these additional attributes, encoded in [PAX vendor -extensions](https://github.com/libarchive/libarchive/wiki/ManPageTar5#pax-interchange-format) as follows: - -* [Windows file attributes](https://msdn.microsoft.com/en-us/library/windows/desktop/gg258117(v=vs.85).aspx) (`MSWINDOWS.fileattr`) -* [Security descriptor](https://msdn.microsoft.com/en-us/library/cc230366.aspx) (`MSWINDOWS.rawsd`): base64-encoded self-relative binary security descriptor -* Mount points (`MSWINDOWS.mountpoint`): if present on a directory symbolic link, then the link should be created as a [directory junction](https://en.wikipedia.org/wiki/NTFS_junction_point) -* Creation time (`LIBARCHIVE.creationtime`) - -## Creating - -### Initial Root Filesystem - -The initial root filesystem is the base or parent layer. - -For this example, an image root filesystem has an initial state as an empty directory. -The name of the directory is not relevant to the layer itself, only for the purpose of producing comparisons. - -Here is an initial empty directory structure for a changeset, with a unique directory name `rootfs-c9d-v1`. - -``` -rootfs-c9d-v1/ -``` - -### Populate Initial Filesystem - -Files and directories are then created: - -``` -rootfs-c9d-v1/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `rootfs-c9d-v1` directory is then created as a plain [tar archive][tar-archive] with relative path to `rootfs-c9d-v1`. -Entries for the following files: - -``` -./ -./etc/ -./etc/my-app-config -./bin/ -./bin/my-app-binary -./bin/my-app-tools -``` - -### Populate a Comparison Filesystem - -Create a new directory and initialize it with a copy or snapshot of the prior root filesystem. -Example commands that can preserve [file attributes](#file-attributes) to make this copy are: -* [cp(1)](http://linux.die.net/man/1/cp): `cp -a rootfs-c9d-v1/ rootfs-c9d-v1.s1/` -* [rsync(1)](http://linux.die.net/man/1/rsync): `rsync -aHAX rootfs-c9d-v1/ rootfs-c9d-v1.s1/` -* [tar(1)](http://linux.die.net/man/1/tar): `mkdir rootfs-c9d-v1.s1 && tar --acls --xattrs -C rootfs-c9d-v1/ -c . | tar -C rootfs-c9d-v1.s1/ --acls --xattrs -x` (including `--selinux` where supported) - -Any [changes](#change-types) to the snapshot MUST NOT change or affect the directory it was copied from. - -For example `rootfs-c9d-v1.s1` is an identical snapshot of `rootfs-c9d-v1`. -In this way `rootfs-c9d-v1.s1` is prepared for updates and alterations. - -**Implementor's Note**: *a copy-on-write or union filesystem can efficiently make directory snapshots* - -Initial layout of the snapshot: - -``` -rootfs-c9d-v1.s1/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -See [Change Types](#change-types) for more details on changes. - -For example, add a directory at `/etc/my-app.d` containing a default config file, removing the existing config file. -Also a change (in attribute or file content) to `./bin/my-app-tools` binary to handle the config layout change. - -Following these changes, the representation of the `rootfs-c9d-v1.s1` directory: - -``` -rootfs-c9d-v1.s1/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -### Determining Changes - -When two directories are compared, the relative root is the top-level directory. -The directories are compared, looking for files that have been [added, modified, or removed](#change-types). - -For this example, `rootfs-c9d-v1/` and `rootfs-c9d-v1.s1/` are recursively compared, each as relative root path. - -The following changeset is found: - -``` -Added: /etc/my-app.d/ -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and directory at `/etc/my-app.d/default.cfg`. -`/bin/my-app-tools` has also been replaced with an updated version. - -### Representing Changes - -A [tar archive][tar-archive] is then created which contains *only* this changeset: - -- Added and modified files and directories in their entirety -- Deleted files or directories marked with a [whiteout file](#whiteouts) - -The resulting tar archive for `rootfs-c9d-v1.s1` has the following entries: - -``` -./etc/my-app.d/ -./etc/my-app.d/default.cfg -./bin/my-app-tools -./etc/.wh.my-app-config -``` - -To signify that the resource `./etc/my-app-config` MUST be removed when the changeset is applied, the basename of the entry is prefixed with `.wh.`. - -## Applying Changesets - -* Layer Changesets of [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` are _applied_, rather than simply extracted as tar archives. -* Applying a layer changeset requires special consideration for the [whiteout](#whiteouts) files. -* In the absence of any [whiteout](#whiteouts) files in a layer changeset, the archive is extracted like a regular tar archive. - -### Changeset over existing files - -This section specifies applying an entry from a layer changeset if the target path already exists. - -If the entry and the existing path are both directories, then the existing path's attributes MUST be replaced by those of the entry in the changeset. -In all other cases, the implementation MUST do the semantic equivalent of the following: -- removing the file path (e.g. [`unlink(2)`](http://linux.die.net/man/2/unlink) on Linux systems) -- recreating the file path, based on the contents and attributes of the changeset entry - -## Whiteouts - -* A whiteout file is an empty file with a special filename that signifies a path should be deleted. -* A whiteout filename consists of the prefix `.wh.` plus the basename of the path to be deleted. -* As files prefixed with `.wh.` are special whiteout markers, it is not possible to create a filesystem which has a file or directory with a name beginning with `.wh.`. -* Once a whiteout is applied, the whiteout itself MUST also be hidden. -* Whiteout files MUST only apply to resources in lower/parent layers. -* Files that are present in the same layer as a whiteout file can only be hidden by whiteout files in subsequent layers. - -The following is a base layer with several resources: - -``` -a/ -a/b/ -a/b/c/ -a/b/c/bar -``` - -When the next layer is created, the original `a/b` directory is deleted and recreated with `a/b/c/foo`: - -``` -a/ -a/.wh..wh..opq -a/b/ -a/b/c/ -a/b/c/foo -``` - -When processing the second layer, `a/.wh..wh..opq` is applied first, before creating the new version of `a/b`, regardless of the ordering in which the whiteout file was encountered. -For example, the following layer is equivalent to the layer above: - -``` -a/ -a/b/ -a/b/c/ -a/b/c/foo -a/.wh..wh..opq -``` - -Implementations SHOULD generate layers such that the whiteout files appear before sibling directory entries. - -### Opaque Whiteout - -* In addition to expressing that a single entry should be removed from a lower layer, layers may remove all of the children using an opaque whiteout entry. -* An opaque whiteout entry is a file with the name `.wh..wh..opq` indicating that all siblings are hidden in the lower layer. - -Let's take the following base layer as an example: - -``` -etc/ - my-app-config -bin/ - my-app-binary - my-app-tools - tools/ - my-app-tool-one -``` - -If all children of `bin/` are removed, the next layer would have the following: - -``` -bin/ - .wh..wh..opq -``` - -This is called _opaque whiteout_ format. -An _opaque whiteout_ file hides _all_ children of the `bin/` including sub-directories and all descendants. -Using _explicit whiteout_ files, this would be equivalent to the following: - -``` -bin/ - .wh.my-app-binary - .wh.my-app-tools - .wh.tools -``` - -In this case, a unique whiteout file is generated for each entry. -If there were more children of `bin/` in the base layer, there would be an entry for each. -Note that this opaque file will apply to _all_ children, including sub-directories, other resources and all descendants. - -Implementations SHOULD generate layers using _explicit whiteout_ files, but MUST accept both. - -Any given image is likely to be composed of several of these Image Filesystem Changeset tar archives. - -# Non-Distributable Layers - -Due to legal requirements, certain layers may not be regularly distributable. -Such "non-distributable" layers are typically downloaded directly from a distributor but never uploaded. - -Non-distributable layers SHOULD be tagged with an alternative mediatype of `application/vnd.oci.image.layer.nondistributable.v1.tar`. -Implementations SHOULD NOT upload layers tagged with this media type; however, such a media type SHOULD NOT affect whether an implementation downloads the layer. - -[Descriptors](descriptor.md) referencing non-distributable layers MAY include `urls` for downloading these layers directly; however, the presence of the `urls` field SHOULD NOT be used to determine whether or not a layer is non-distributable. - -[libarchive-tar]: https://github.com/libarchive/libarchive/wiki/ManPageTar5#POSIX_ustar_Archives -[gnu-tar-standard]: http://www.gnu.org/software/tar/manual/html_node/Standard.html -[rfc1952_2]: https://tools.ietf.org/html/rfc1952 -[tar-archive]: https://en.wikipedia.org/wiki/Tar_(computing) diff --git a/vendor/github.com/opencontainers/image-spec/manifest.md b/vendor/github.com/opencontainers/image-spec/manifest.md deleted file mode 100644 index b05ccc7d..00000000 --- a/vendor/github.com/opencontainers/image-spec/manifest.md +++ /dev/null @@ -1,106 +0,0 @@ -# OCI Image Manifest Specification - -There are three main goals of the Image Manifest Specification. -The first goal is content-addressable images, by supporting an image model where the image's configuration can be hashed to generate a unique ID for the image and its components. -The second goal is to allow multi-architecture images, through a "fat manifest" which references image manifests for platform-specific versions of an image. -In OCI, this is codified in an [image index](image-index.md). -The third goal is to be [translatable](conversion.md) to the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec). - -This section defines the `application/vnd.oci.image.manifest.v1+json` [media type](media-types.md). -For the media type(s) that this is compatible with see the [matrix](media-types.md#compatibility-matrix). - -# Image Manifest - -Unlike the [image index](image-index.md), which contains information about a set of images that can span a variety of architectures and operating systems, an image manifest provides a configuration and set of layers for a single container image for a specific architecture and operating system. - -## *Image Manifest* Property Descriptions - -- **`schemaVersion`** *int* - - This REQUIRED property specifies the image manifest schema version. - For this version of the specification, this MUST be `2` to ensure backward compatibility with older versions of Docker. The value of this field will not change. This field MAY be removed in a future version of the specification. - -- **`mediaType`** *string* - - This property is *reserved* for use, to [maintain compatibility](media-types.md#compatibility-matrix). - When used, this field contains the media type of this document, which differs from the [descriptor](descriptor.md#properties) use of `mediaType`. - -- **`config`** *[descriptor](descriptor.md)* - - This REQUIRED property references a configuration object for a container, by digest. - Beyond the [descriptor requirements](descriptor.md#properties), the value has the following additional restrictions: - - - **`mediaType`** *string* - - This [descriptor property](descriptor.md#properties) has additional restrictions for `config`. - Implementations MUST support at least the following media types: - - - [`application/vnd.oci.image.config.v1+json`](config.md) - - Manifests concerned with portability SHOULD use one of the above media types. - -- **`layers`** *array of objects* - - Each item in the array MUST be a [descriptor](descriptor.md). - The array MUST have the base layer at index 0. - Subsequent layers MUST then follow in stack order (i.e. from `layers[0]` to `layers[len(layers)-1]`). - The final filesystem layout MUST match the result of [applying](layer.md#applying-changesets) the layers to an empty directory. - The [ownership, mode, and other attributes](layer.md#file-attributes) of the initial empty directory are unspecified. - - Beyond the [descriptor requirements](descriptor.md#properties), the value has the following additional restrictions: - - - **`mediaType`** *string* - - This [descriptor property](descriptor.md#properties) has additional restrictions for `layers[]`. - Implementations MUST support at least the following media types: - - - [`application/vnd.oci.image.layer.v1.tar`](layer.md) - - [`application/vnd.oci.image.layer.v1.tar+gzip`](layer.md#gzip-media-types) - - [`application/vnd.oci.image.layer.nondistributable.v1.tar`](layer.md#non-distributable-layers) - - [`application/vnd.oci.image.layer.nondistributable.v1.tar+gzip`](layer.md#gzip-media-types) - - Manifests concerned with portability SHOULD use one of the above media types. - - Entries in this field will frequently use the `+gzip` types. - -- **`annotations`** *string-string map* - - This OPTIONAL property contains arbitrary metadata for the image manifest. - This OPTIONAL property MUST use the [annotation rules](annotations.md#rules). - - See [Pre-Defined Annotation Keys](annotations.md#pre-defined-annotation-keys). - -## Example Image Manifest - -*Example showing an image manifest:* -```json,title=Manifest&mediatype=application/vnd.oci.image.manifest.v1%2Bjson -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 32654, - "digest": "sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ], - "annotations": { - "com.example.key1": "value1", - "com.example.key2": "value2" - } -} -``` diff --git a/vendor/github.com/opencontainers/image-spec/media-types.md b/vendor/github.com/opencontainers/image-spec/media-types.md deleted file mode 100644 index 1f01fd6b..00000000 --- a/vendor/github.com/opencontainers/image-spec/media-types.md +++ /dev/null @@ -1,68 +0,0 @@ -# OCI Image Media Types - -The following media types identify the formats described here and their referenced resources: - -- `application/vnd.oci.descriptor.v1+json`: [Content Descriptor](descriptor.md) -- `application/vnd.oci.layout.header.v1+json`: [OCI Layout](image-layout.md#oci-layout-file) -- `application/vnd.oci.image.index.v1+json`: [Image Index](image-index.md) -- `application/vnd.oci.image.manifest.v1+json`: [Image manifest](manifest.md#image-manifest) -- `application/vnd.oci.image.config.v1+json`: [Image config](config.md) -- `application/vnd.oci.image.layer.v1.tar`: ["Layer", as a tar archive](layer.md) -- `application/vnd.oci.image.layer.v1.tar+gzip`: ["Layer", as a tar archive](layer.md#gzip-media-types) compressed with [gzip][rfc1952] -- `application/vnd.oci.image.layer.nondistributable.v1.tar`: ["Layer", as a tar archive with distribution restrictions](layer.md#non-distributable-layers) -- `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip`: ["Layer", as a tar archive with distribution restrictions](layer.md#gzip-media-types) compressed with [gzip][rfc1952] - -## Media Type Conflicts - -[Blob](image-layout.md) retrieval methods MAY return media type metadata. -For example, a HTTP response might return a manifest with the Content-Type header set to `application/vnd.oci.image.manifest.v1+json`. -Implementations MAY also have expectations for the blob's media type and digest (e.g. from a [descriptor](descriptor.md) referencing the blob). - -* Implementations that do not have an expected media type for the blob SHOULD respect the returned media type. -* Implementations that have an expected media type which matches the returned media type SHOULD respect the matched media type. -* Implementations that have an expected media type which does not match the returned media type SHOULD: - * Respect the expected media type if the blob matches the expected digest. - Implementations MAY warn about the media type mismatch. - * Return an error if the blob does not match the expected digest (as [recommended for descriptors](descriptor.md#properties)). - * Return an error if they do not have an expected digest. - -## Compatibility Matrix - -The OCI Image Specification strives to be backwards and forwards compatible when possible. -Breaking compatibility with existing systems creates a burden on users whether they are build systems, distribution systems, container engines, etc. -This section shows where the OCI Image Specification is compatible with formats external to the OCI Image and different versions of this specification. - -### application/vnd.oci.image.index.v1+json - -**Similar/related schema** - -- [application/vnd.docker.distribution.manifest.list.v2+json](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) - mediaType is different - -### application/vnd.oci.image.manifest.v1+json - -**Similar/related schema** - -- [application/vnd.docker.distribution.manifest.v2+json](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#image-manifest-field-descriptions) - -### application/vnd.oci.image.layer.v1.tar+gzip - -**Interchangeable and fully compatible mime-types** - -- [application/vnd.docker.image.rootfs.diff.tar.gzip](https://github.com/docker/docker/blob/master/image/spec/v1.md#creating-an-image-filesystem-changeset) - -### application/vnd.oci.image.config.v1+json - -**Similar/related schema** - -- [application/vnd.docker.container.image.v1+json](https://github.com/docker/docker/blob/master/image/spec/v1.md#image-json-description) - -## Relations - -The following figure shows how the above media types reference each other: - -![](img/media-types.png) - -[Descriptors](descriptor.md) are used for all references. -The image-index being a "fat manifest" references a list of image manifests per target platform. An image manifest references exactly one target configuration and possibly many layers. - -[rfc1952]: https://tools.ietf.org/html/rfc1952 diff --git a/vendor/github.com/opencontainers/image-spec/project.md b/vendor/github.com/opencontainers/image-spec/project.md deleted file mode 100644 index 440484b6..00000000 --- a/vendor/github.com/opencontainers/image-spec/project.md +++ /dev/null @@ -1,7 +0,0 @@ -# Project docs - -## Release Process - -* `git tag` the prior commit (preferably signed tag) -* Make a release on [github.com/opencontainers/image-spec](https://github.com/opencontainers/image-spec/releases) for the version. Attach the produced docs. - diff --git a/vendor/github.com/opencontainers/image-spec/spec.md b/vendor/github.com/opencontainers/image-spec/spec.md deleted file mode 100644 index bf847438..00000000 --- a/vendor/github.com/opencontainers/image-spec/spec.md +++ /dev/null @@ -1,69 +0,0 @@ -# Open Container Initiative -## Image Format Specification - -This specification defines an OCI Image, consisting of a [manifest](manifest.md), an [image index](image-index.md) (optional), a set of [filesystem layers](layer.md), and a [configuration](config.md). - -The goal of this specification is to enable the creation of interoperable tools for building, transporting, and preparing a container image to run. - -### Table of Contents - -- [Introduction](spec.md) -- [Notational Conventions](#notational-conventions) -- [Overview](#overview) - - [Understanding the Specification](#understanding-the-specification) - - [Media Types](media-types.md) -- [Content Descriptors](descriptor.md) -- [Image Layout](image-layout.md) -- [Image Manifest](manifest.md) -- [Image Index](image-index.md) -- [Filesystem Layers](layer.md) -- [Image Configuration](config.md) -- [Annotations](annotations.md) -- [Conversion](conversion.md) -- [Considerations](considerations.md) - - [Extensibility](considerations.md#extensibility) - - [Canonicalization](considerations.md#canonicalization) - -## Notational Conventions - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as described in [RFC 2119](http://tools.ietf.org/html/rfc2119) (Bradner, S., "Key words for use in RFCs to Indicate Requirement Levels", BCP 14, RFC 2119, March 1997). - -The key words "unspecified", "undefined", and "implementation-defined" are to be interpreted as described in the [rationale for the C99 standard][c99-unspecified]. - -An implementation is not compliant if it fails to satisfy one or more of the MUST, MUST NOT, REQUIRED, SHALL, or SHALL NOT requirements for the protocols it implements. -An implementation is compliant if it satisfies all the MUST, MUST NOT, REQUIRED, SHALL, and SHALL NOT requirements for the protocols it implements. - -## Overview - -At a high level the image manifest contains metadata about the contents and dependencies of the image including the content-addressable identity of one or more [filesystem layer changeset](layer.md) archives that will be unpacked to make up the final runnable filesystem. -The image configuration includes information such as application arguments, environments, etc. -The image index is a higher-level manifest which points to a list of manifests and descriptors. -Typically, these manifests may provide different implementations of the image, possibly varying by platform or other attributes. - -![](img/build-diagram.png) - -Once built the OCI Image can then be discovered by name, downloaded, verified by hash, trusted through a signature, and unpacked into an [OCI Runtime Bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md). - -![](img/run-diagram.png) - -### Understanding the Specification - -The [OCI Image Media Types](media-types.md) document is a starting point to understanding the overall structure of the specification. - -The high-level components of the spec include: - -* [Image Manifest](manifest.md) - a document describing the components that make up a container image -* [Image Index](image-index.md) - an annotated index of image manifests -* [Image Layout](image-layout.md) - a filesystem layout representing the contents of an image -* [Filesystem Layer](layer.md) - a changeset that describes a container's filesystem -* [Image Configuration](config.md) - a document determining layer ordering and configuration of the image suitable for translation into a [runtime bundle][runtime-spec] -* [Conversion](conversion.md) - a document describing how this translation should occur -* [Descriptor](descriptor.md) - a reference that describes the type, metadata and content address of referenced content - -Future versions of this specification may include the following OPTIONAL features: - -* Signatures that are based on signing image content address -* Naming that is federated based on DNS and can be delegated - -[c99-unspecified]: http://www.open-std.org/jtc1/sc22/wg14/www/C99RationaleV5.10.pdf#page=18 -[runtime-spec]: https://github.com/opencontainers/runtime-spec diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel b/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel deleted file mode 100644 index d014850c..00000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel +++ /dev/null @@ -1,11 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "version.go", - "versioned.go", - ], - importpath = "github.com/opencontainers/image-spec/specs-go", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel b/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel deleted file mode 100644 index 36b5e2db..00000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "config.go", - "descriptor.go", - "index.go", - "layout.go", - "manifest.go", - "mediatype.go", - ], - importpath = "github.com/opencontainers/image-spec/specs-go/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/opencontainers/go-digest:go_default_library", - "//vendor/github.com/opencontainers/image-spec/specs-go:go_default_library", - ], -) diff --git a/vendor/github.com/opencontainers/runc/.gitignore b/vendor/github.com/opencontainers/runc/.gitignore deleted file mode 100644 index 76c04941..00000000 --- a/vendor/github.com/opencontainers/runc/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -vendor/pkg -/runc -Godeps/_workspace/src/github.com/opencontainers/runc -man/man8 diff --git a/vendor/github.com/opencontainers/runc/BUILD.bazel b/vendor/github.com/opencontainers/runc/BUILD.bazel deleted file mode 100644 index 42fec42a..00000000 --- a/vendor/github.com/opencontainers/runc/BUILD.bazel +++ /dev/null @@ -1,85 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "delete.go", - "main.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "checkpoint.go", - "events.go", - "exec.go", - "kill.go", - "list.go", - "main_unix.go", - "pause.go", - "restore.go", - "rlimit_linux.go", - "signals.go", - "spec.go", - "start.go", - "state.go", - "tty.go", - "utils.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "main_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "main_unsupported.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/opencontainers/runc", - visibility = ["//visibility:private"], - deps = [ - "//vendor/github.com/Sirupsen/logrus:go_default_library", - "//vendor/github.com/codegangsta/cli:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer:go_default_library", - "//vendor/github.com/opencontainers/runtime-spec/specs-go:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/coreos/go-systemd/activation:go_default_library", - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/nsenter:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/specconv:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_binary( - name = "runc", - embed = [":go_default_library"], - importpath = "github.com/opencontainers/runc", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/opencontainers/runc/CONTRIBUTING.md b/vendor/github.com/opencontainers/runc/CONTRIBUTING.md deleted file mode 100644 index 6f341f60..00000000 --- a/vendor/github.com/opencontainers/runc/CONTRIBUTING.md +++ /dev/null @@ -1,117 +0,0 @@ -## Contribution Guidelines - -### Pull requests are always welcome - -We are always thrilled to receive pull requests, and do our best to -process them as fast as possible. Not sure if that typo is worth a pull -request? Do it! We will appreciate it. - -If your pull request is not accepted on the first try, don't be -discouraged! If there's a problem with the implementation, hopefully you -received feedback on what to improve. - -We're trying very hard to keep runc lean and focused. We don't want it -to do everything for everybody. This means that we might decide against -incorporating a new feature. However, there might be a way to implement -that feature *on top of* runc. - - -### Conventions - -Fork the repo and make changes on your fork in a feature branch: - -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue -- If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. Run the full test suite on -your branch before submitting a pull request. - -Update the documentation when creating or modifying features. Test -your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plugins that do this automatically. - -Pull requests descriptions should be as clear as possible and include a -reference to all the issues that they address. - -Pull requests must not contain commits from other users or branches. - -Commit messages must start with a capitalized and short summary (max. 50 -chars) written in the imperative, followed by an optional, more detailed -explanatory text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Be -sure to post a comment after pushing. The new commits will show up in the pull -request automatically, but the reviewers will not be notified unless you -comment. - -Before the pull request is merged, make sure that you squash your commits into -logical units of work using `git rebase -i` and `git push -f`. After every -commit the test suite should be passing. Include documentation changes in the -same commit so that a revert would remove all traces of the feature or fix. - -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the -patch, which certifies that you wrote it or otherwise have the right to -pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below (from -[developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -You can add the sign off when creating the git commit via `git commit -s`. diff --git a/vendor/github.com/opencontainers/runc/Dockerfile b/vendor/github.com/opencontainers/runc/Dockerfile deleted file mode 100644 index a25c2210..00000000 --- a/vendor/github.com/opencontainers/runc/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM runc_test -ADD . /go/src/github.com/opencontainers/runc -RUN make diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 00000000..f090cb42 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE new file mode 100644 index 00000000..5515ccfb --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2013 Jeremy Saenz +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE new file mode 100644 index 00000000..c7a3f0cf --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE new file mode 100644 index 00000000..6e6f469a --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000..e67cdabd --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png new file mode 100644 index 00000000..8839723a Binary files /dev/null and b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png differ diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE new file mode 100644 index 00000000..ac74d8f0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..9e4bd4db --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..ac74d8f0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE new file mode 100644 index 00000000..670d88fc --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..1b1b1921 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE new file mode 100644 index 00000000..bdc40365 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE new file mode 100644 index 00000000..405a4961 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE @@ -0,0 +1,8 @@ +ffjson +Copyright (c) 2014, Paul Querna + +This product includes software developed by +Paul Querna (http://paul.querna.org/). + +Portions of this software were developed as +part of Go, Copyright (c) 2012 The Go Authors. \ No newline at end of file diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE new file mode 100644 index 00000000..81cf60de --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2015 Matthew Heon +Copyright (c) 2015 Paul Moore +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +- Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE new file mode 100644 index 00000000..80dd96de --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE new file mode 100644 index 00000000..9f64db85 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/MAINTAINERS b/vendor/github.com/opencontainers/runc/MAINTAINERS deleted file mode 100644 index d918b37c..00000000 --- a/vendor/github.com/opencontainers/runc/MAINTAINERS +++ /dev/null @@ -1,9 +0,0 @@ -Michael Crosby (@crosbymichael) -Rohit Jnagal (@rjnagal) -Victor Marmol (@vmarmol) -Mrunal Patel (@mrunalp) -Alexander Morozov (@LK4D4) -Daniel, Dao Quang Minh (@dqminh) -Andrey Vagin (@avagin) -Qiang Huang (@hqhq) -Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/opencontainers/runc/MAINTAINERS_GUIDE.md b/vendor/github.com/opencontainers/runc/MAINTAINERS_GUIDE.md deleted file mode 100644 index caf27b54..00000000 --- a/vendor/github.com/opencontainers/runc/MAINTAINERS_GUIDE.md +++ /dev/null @@ -1,120 +0,0 @@ -## Introduction - -Dear maintainer. Thank you for investing the time and energy to help -make runc as useful as possible. Maintaining a project is difficult, -sometimes unrewarding work. Sure, you will get to contribute cool -features to the project. But most of your time will be spent reviewing, -cleaning up, documenting, answering questions, justifying design -decisions - while everyone has all the fun! But remember - the quality -of the maintainers work is what distinguishes the good projects from the -great. So please be proud of your work, even the unglamourous parts, -and encourage a culture of appreciation and respect for *every* aspect -of improving the project - not just the hot new features. - -This document is a manual for maintainers old and new. It explains what -is expected of maintainers, how they should work, and what tools are -available to them. - -This is a living document - if you see something out of date or missing, -speak up! - -## What are a maintainer's responsibility? - -It is every maintainer's responsibility to: - -* 1) Expose a clear roadmap for improving their component. -* 2) Deliver prompt feedback and decisions on pull requests. -* 3) Be available to anyone with questions, bug reports, criticism etc. - on their component. This includes IRC and GitHub issues and pull requests. -* 4) Make sure their component respects the philosophy, design and - roadmap of the project. - -## How are decisions made? - -Short answer: with pull requests to the runc repository. - -runc is an open-source project with an open design philosophy. This -means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, roadmap and APIs. *If it's -part of the project, it's in the repo. It's in the repo, it's part of -the project.* - -As a result, all decisions can be expressed as changes to the -repository. An implementation change is a change to the source code. An -API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto. And so on. - -All decisions affecting runc, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do -this (see below "Who decides what?") - -### I'm a maintainer, should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -## Who decides what? - -All decisions are pull requests, and the relevant maintainers make -decisions by accepting or refusing the pull request. Review and acceptance -by anyone is denoted by adding a comment in the pull request: `LGTM`. -However, only currently listed `MAINTAINERS` are counted towards the required -two LGTMs. - -Overall the maintainer system works because of mutual respect across the -maintainers of the project. The maintainers trust one another to make decisions -in the best interests of the project. Sometimes maintainers can disagree and -this is part of a healthy project to represent the point of views of various people. -In the case where maintainers cannot find agreement on a specific change the -role of a Chief Maintainer comes into play. - -The Chief Maintainer for the project is responsible for overall architecture -of the project to maintain conceptual integrity. Large decisions and -architecture changes should be reviewed by the chief maintainer. -The current chief maintainer for the project is Michael Crosby (@crosbymichael). - -Even though the maintainer system is built on trust, if there is a conflict -with the chief maintainer on a decision, their decision can be challenged -and brought to the technical oversight board if two-thirds of the -maintainers vote for an appeal. It is expected that this would be a -very exceptional event. - - -### How are maintainers added? - -The best maintainers have a vested interest in the project. Maintainers -are first and foremost contributors that have shown they are committed to -the long term success of the project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, -pull request review, and triage of issues in the project for more than two months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. The -final vote to add a new maintainer should be approved by over 66% of the current -maintainers with the chief maintainer having veto power. In case of a veto, -conflict resolution rules expressed above apply. The voting period is -five business days on the Pull Request to add the new maintainer. - - -### What is expected of maintainers? - -Part of a healthy project is to have active maintainers to support the community -in contributions and perform tasks to keep the project running. Maintainers are -expected to be able to respond in a timely manner if their help is required on specific -issues where they are pinged. Being a maintainer is a time consuming commitment and should -not be taken lightly. - -When a maintainer is unable to perform the required duties they can be removed with -a vote by 66% of the current maintainers with the chief maintainer having veto power. -The voting period is ten business days. Issues related to a maintainer's performance should -be discussed with them among the other maintainers so that they are not surprised by -a pull request removing them. - - - diff --git a/vendor/github.com/opencontainers/runc/Makefile b/vendor/github.com/opencontainers/runc/Makefile deleted file mode 100644 index 5de10d0b..00000000 --- a/vendor/github.com/opencontainers/runc/Makefile +++ /dev/null @@ -1,57 +0,0 @@ -RUNC_IMAGE=runc_dev -RUNC_TEST_IMAGE=runc_test -PROJECT=github.com/opencontainers/runc -TEST_DOCKERFILE=script/test_Dockerfile -BUILDTAGS=seccomp -RUNC_BUILD_PATH=/go/src/github.com/opencontainers/runc/runc -RUNC_INSTANCE=runc_dev -COMMIT=$(shell git rev-parse HEAD 2> /dev/null || true) -RUNC_LINK=$(CURDIR)/Godeps/_workspace/src/github.com/opencontainers/runc -export GOPATH:=$(CURDIR)/Godeps/_workspace:$(GOPATH) - -.PHONY=dbuild - -all: -ifneq ($(RUNC_LINK), $(wildcard $(RUNC_LINK))) - ln -sfn $(CURDIR) $(RUNC_LINK) -endif - go build -ldflags "-X main.gitCommit=${COMMIT}" -tags "$(BUILDTAGS)" -o runc . - -static: - CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT}" -o runc . - -lint: - go vet ./... - go fmt ./... - -runctestimage: - docker build -t $(RUNC_TEST_IMAGE) -f $(TEST_DOCKERFILE) . - -test: runctestimage - docker run -e TESTFLAGS -ti --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_TEST_IMAGE) make localtest - tests/sniffTest - -localtest: all - go test -tags "$(BUILDTAGS)" ${TESTFLAGS} -v ./... - -dbuild: runctestimage - docker build -t $(RUNC_IMAGE) . - docker create --name=$(RUNC_INSTANCE) $(RUNC_IMAGE) - docker cp $(RUNC_INSTANCE):$(RUNC_BUILD_PATH) . - docker rm $(RUNC_INSTANCE) - -install: - install -D -m0755 runc /usr/local/sbin/runc - -uninstall: - rm -f /usr/local/sbin/runc - -clean: - rm -f runc - rm -f $(RUNC_LINK) - -validate: - script/validate-gofmt - go vet ./... - -ci: validate localtest diff --git a/vendor/github.com/opencontainers/runc/PRINCIPLES.md b/vendor/github.com/opencontainers/runc/PRINCIPLES.md deleted file mode 100644 index fdcc3738..00000000 --- a/vendor/github.com/opencontainers/runc/PRINCIPLES.md +++ /dev/null @@ -1,19 +0,0 @@ -# runc principles - -In the design and development of runc and libcontainer we try to follow these principles: - -(Work in progress) - -* Don't try to replace every tool. Instead, be an ingredient to improve them. -* Less code is better. -* Fewer components are better. Do you really need to add one more class? -* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. -* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code. -* When hesitating between two options, choose the one that is easier to reverse. -* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. -* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. -* The fewer moving parts in a container, the better. -* Don't merge it unless you document it. -* Don't document it unless you can keep it up-to-date. -* Don't merge it unless you test it! -* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md deleted file mode 100644 index 1e6e503a..00000000 --- a/vendor/github.com/opencontainers/runc/README.md +++ /dev/null @@ -1,144 +0,0 @@ -[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=runc Master)](https://jenkins.dockerproject.org/job/runc Master) - -## runc - -`runc` is a CLI tool for spawning and running containers according to the OCF specification. - -## State of the project - -Currently `runc` is an implementation of the OCI specification. We are currently sprinting -to have a v1 of the spec out. So the `runc` config format will be constantly changing until -the spec is finalized. However, we encourage you to try out the tool and give feedback. - -### OCF - -How does `runc` integrate with the Open Container Initiative Specification? -`runc` depends on the types specified in the -[specs](https://github.com/opencontainers/runtime-spec) repository. Whenever the -specification is updated and ready to be versioned `runc` will update its dependency -on the specs repository and support the update spec. - -### Building: - -At the time of writing, runc only builds on the Linux platform. - -```bash -# create a 'github.com/opencontainers' in your GOPATH/src -cd github.com/opencontainers -git clone https://github.com/opencontainers/runc -cd runc -make -sudo make install -``` - -In order to enable seccomp support you will need to install libseccomp on your platform. -If you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make. - -#### Build Tags - -`runc` supports optional build tags for compiling in support for various features. - - -| Build Tag | Feature | Dependency | -|-----------|------------------------------------|-------------| -| seccomp | Syscall filtering | libseccomp | -| selinux | selinux process and mount labeling | | -| apparmor | apparmor profile support | libapparmor | - -### Testing: - -You can run tests for runC by using command: - -```bash -# make test -``` - -Note that test cases are run in Docker container, so you need to install -`docker` first. And test requires mounting cgroups inside container, it's -done by docker now, so you need a docker version newer than 1.8.0-rc2. - -You can also run specific test cases by: - -```bash -# make test TESTFLAGS="-run=SomeTestFunction" -``` - -### Using: - -To run a container with the id "test", execute `runc start` with the containers id as arg one -in the bundle's root directory: - -```bash -runc start test -/ $ ps -PID USER COMMAND -1 daemon sh -5 daemon sh -/ $ -``` - -### OCI Container JSON Format: - -OCI container JSON format is based on OCI [specs](https://github.com/opencontainers/runtime-spec). -You can generate JSON files by using `runc spec`. -It assumes that the file-system is found in a directory called -`rootfs` and there is a user with uid and gid of `0` defined within that file-system. - -### Examples: - -#### Using a Docker image (requires version 1.3 or later) - -To test using Docker's `busybox` image follow these steps: -* Install `docker` and download the `busybox` image: `docker pull busybox` -* Create a container from that image and export its contents to a tar file: -`docker export $(docker create busybox) > busybox.tar` -* Untar the contents to create your filesystem directory: -``` -mkdir rootfs -tar -C rootfs -xf busybox.tar -``` -* Create `config.json` by using `runc spec`. -* Execute `runc start` and you should be placed into a shell where you can run `ps`: -``` -$ runc start test -/ # ps -PID USER COMMAND - 1 root sh - 9 root ps -``` - -#### Using runc with systemd - -To use runc with systemd, you can create a unit file -`/usr/lib/systemd/system/minecraft.service` as below (edit your -own Description or WorkingDirectory or service name as you need). - -```service -[Unit] -Description=Minecraft Build Server -Documentation=http://minecraft.net -After=network.target - -[Service] -CPUQuota=200% -MemoryLimit=1536M -ExecStart=/usr/local/bin/runc start minecraft -Restart=on-failure -WorkingDirectory=/containers/minecraftbuild - -[Install] -WantedBy=multi-user.target -``` - -Make sure you have the bundle's root directory and JSON configs in -your WorkingDirectory, then use systemd commands to start the service: - -```bash -systemctl daemon-reload -systemctl start minecraft.service -``` - -Note that if you use JSON configs by `runc spec`, you need to modify -`config.json` and change `process.terminal` to false so runc won't -create tty, because we can't set terminal from the stdin when using -systemd service. diff --git a/vendor/github.com/opencontainers/runc/checkpoint.go b/vendor/github.com/opencontainers/runc/checkpoint.go deleted file mode 100644 index 81de35a8..00000000 --- a/vendor/github.com/opencontainers/runc/checkpoint.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "strconv" - "strings" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer" -) - -var checkpointCommand = cli.Command{ - Name: "checkpoint", - Usage: "checkpoint a running container", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -checkpointed.`, - Description: `The checkpoint command saves the state of the container instance.`, - Flags: []cli.Flag{ - cli.StringFlag{Name: "image-path", Value: "", Usage: "path for saving criu image files"}, - cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"}, - cli.BoolFlag{Name: "leave-running", Usage: "leave the process running after checkpointing"}, - cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"}, - cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"}, - cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"}, - cli.StringFlag{Name: "page-server", Value: "", Usage: "ADDRESS:PORT of the page server"}, - cli.BoolFlag{Name: "file-locks", Usage: "handle file locks, for safety"}, - cli.StringFlag{Name: "manage-cgroups-mode", Value: "", Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'."}, - }, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - defer destroy(container) - options := criuOptions(context) - // these are the mandatory criu options for a container - setPageServer(context, options) - setManageCgroupsMode(context, options) - if err := container.Checkpoint(options); err != nil { - fatal(err) - } - }, -} - -func getCheckpointImagePath(context *cli.Context) string { - imagePath := context.String("image-path") - if imagePath == "" { - imagePath = getDefaultImagePath(context) - } - return imagePath -} - -func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) { - // xxx following criu opts are optional - // The dump image can be sent to a criu page server - if psOpt := context.String("page-server"); psOpt != "" { - addressPort := strings.Split(psOpt, ":") - if len(addressPort) != 2 { - fatal(fmt.Errorf("Use --page-server ADDRESS:PORT to specify page server")) - } - portInt, err := strconv.Atoi(addressPort[1]) - if err != nil { - fatal(fmt.Errorf("Invalid port number")) - } - options.PageServer = libcontainer.CriuPageServerInfo{ - Address: addressPort[0], - Port: int32(portInt), - } - } -} - -func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts) { - if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" { - switch cgOpt { - case "soft": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT - case "full": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL - case "strict": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT - default: - fatal(fmt.Errorf("Invalid manage cgroups mode")) - } - } -} diff --git a/vendor/github.com/opencontainers/runc/delete.go b/vendor/github.com/opencontainers/runc/delete.go deleted file mode 100644 index 3468d106..00000000 --- a/vendor/github.com/opencontainers/runc/delete.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer" -) - -var deleteCommand = cli.Command{ - Name: "delete", - Usage: "delete any resources held by the container often used with detached containers", - ArgsUsage: ` - -Where "" is the name for the instance of the container. - -For example, if the container id is "ubuntu01" and runc list currently shows the -status of "ubuntu01" as "destroyed" the following will delete resources held for -"ubuntu01" removing "ubuntu01" from the runc list of containers: - - # runc delete ubuntu01`, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - if lerr, ok := err.(libcontainer.Error); ok && lerr.Code() == libcontainer.ContainerNotExists { - // if there was an aborted start or something of the sort then the container's directory could exist but - // libcontainer does not see it because the state.json file inside that directory was never created. - path := filepath.Join(context.GlobalString("root"), context.Args().First()) - if err := os.RemoveAll(path); err == nil { - return - } - } - fatal(err) - } - destroy(container) - }, -} diff --git a/vendor/github.com/opencontainers/runc/events.go b/vendor/github.com/opencontainers/runc/events.go deleted file mode 100644 index 777ecea2..00000000 --- a/vendor/github.com/opencontainers/runc/events.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "os" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer" -) - -// event struct for encoding the event data to json. -type event struct { - Type string `json:"type"` - ID string `json:"id"` - Data interface{} `json:"data,omitempty"` -} - -var eventsCommand = cli.Command{ - Name: "events", - Usage: "display container events such as OOM notifications, cpu, memory, IO and network stats", - ArgsUsage: ` - -Where "" is the name for the instance of the container.`, - Description: `The events command displays information about the container. By default the -information is displayed once every 5 seconds.`, - Flags: []cli.Flag{ - cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"}, - cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"}, - }, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - var ( - stats = make(chan *libcontainer.Stats, 1) - events = make(chan *event, 1024) - group = &sync.WaitGroup{} - ) - group.Add(1) - go func() { - defer group.Done() - enc := json.NewEncoder(os.Stdout) - for e := range events { - if err := enc.Encode(e); err != nil { - logrus.Error(err) - } - } - }() - if context.Bool("stats") { - s, err := container.Stats() - if err != nil { - fatal(err) - } - events <- &event{Type: "stats", ID: container.ID(), Data: s} - close(events) - group.Wait() - return - } - go func() { - for range time.Tick(context.Duration("interval")) { - s, err := container.Stats() - if err != nil { - logrus.Error(err) - continue - } - stats <- s - } - }() - n, err := container.NotifyOOM() - if err != nil { - fatal(err) - } - for { - select { - case _, ok := <-n: - if ok { - // this means an oom event was received, if it is !ok then - // the channel was closed because the container stopped and - // the cgroups no longer exist. - events <- &event{Type: "oom", ID: container.ID()} - } else { - n = nil - } - case s := <-stats: - events <- &event{Type: "stats", ID: container.ID(), Data: s} - } - if n == nil { - close(events) - break - } - } - group.Wait() - }, -} diff --git a/vendor/github.com/opencontainers/runc/exec.go b/vendor/github.com/opencontainers/runc/exec.go deleted file mode 100644 index 76525886..00000000 --- a/vendor/github.com/opencontainers/runc/exec.go +++ /dev/null @@ -1,187 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "os" - "strconv" - "strings" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/opencontainers/runtime-spec/specs-go" -) - -var execCommand = cli.Command{ - Name: "exec", - Usage: "execute new process inside the container", - ArgsUsage: ` - -Where "" is the name for the instance of the container and -"" is the command to be executed in the container. - -For example, if the container is configured to run the linux ps command the -following will output a list of processes running in the container: - - # runc exec ps`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "console", - Usage: "specify the pty slave path for use with the container", - }, - cli.StringFlag{ - Name: "cwd", - Usage: "current working directory in the container", - }, - cli.StringSliceFlag{ - Name: "env, e", - Usage: "set environment variables", - }, - cli.BoolFlag{ - Name: "tty, t", - Usage: "allocate a pseudo-TTY", - }, - cli.StringFlag{ - Name: "user, u", - Usage: "UID (format: [:])", - }, - cli.StringFlag{ - Name: "process, p", - Usage: "path to the process.json", - }, - cli.BoolFlag{ - Name: "detach,d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.StringFlag{ - Name: "process-label", - Usage: "set the asm process label for the process commonly used with selinux", - }, - cli.StringFlag{ - Name: "apparmor", - Usage: "set the apparmor profile for the process", - }, - cli.BoolFlag{ - Name: "no-new-privs", - Usage: "set the no new privileges value for the process", - }, - cli.StringSliceFlag{ - Name: "cap, c", - Value: &cli.StringSlice{}, - Usage: "add a capability to the bounding set for the process", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - }, - }, - Action: func(context *cli.Context) { - if os.Geteuid() != 0 { - fatalf("runc should be run as root") - } - status, err := execProcess(context) - if err != nil { - fatalf("exec failed: %v", err) - } - os.Exit(status) - }, -} - -func execProcess(context *cli.Context) (int, error) { - container, err := getContainer(context) - if err != nil { - return -1, err - } - detach := context.Bool("detach") - state, err := container.State() - if err != nil { - return -1, err - } - bundle := utils.SearchLabels(state.Config.Labels, "bundle") - p, err := getProcess(context, bundle) - if err != nil { - return -1, err - } - r := &runner{ - enableSubreaper: !context.Bool("no-subreaper"), - shouldDestroy: false, - container: container, - console: context.String("console"), - detach: detach, - pidFile: context.String("pid-file"), - } - return r.run(p) -} - -func getProcess(context *cli.Context, bundle string) (*specs.Process, error) { - if path := context.String("process"); path != "" { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - var p specs.Process - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - return &p, validateProcessSpec(&p) - } - // process via cli flags - if err := os.Chdir(bundle); err != nil { - return nil, err - } - spec, err := loadSpec(specConfig) - if err != nil { - return nil, err - } - p := spec.Process - p.Args = context.Args()[1:] - // override the cwd, if passed - if context.String("cwd") != "" { - p.Cwd = context.String("cwd") - } - if ap := context.String("apparmor"); ap != "" { - p.ApparmorProfile = ap - } - if l := context.String("process-label"); l != "" { - p.SelinuxLabel = l - } - if caps := context.StringSlice("cap"); len(caps) > 0 { - p.Capabilities = caps - } - // append the passed env variables - for _, e := range context.StringSlice("env") { - p.Env = append(p.Env, e) - } - // set the tty - if context.IsSet("tty") { - p.Terminal = context.Bool("tty") - } - if context.IsSet("no-new-privs") { - p.NoNewPrivileges = context.Bool("no-new-privs") - } - // override the user, if passed - if context.String("user") != "" { - u := strings.SplitN(context.String("user"), ":", 2) - if len(u) > 1 { - gid, err := strconv.Atoi(u[1]) - if err != nil { - return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err) - } - p.User.GID = uint32(gid) - } - uid, err := strconv.Atoi(u[0]) - if err != nil { - return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err) - } - p.User.UID = uint32(uid) - } - return &p, nil -} diff --git a/vendor/github.com/opencontainers/runc/kill.go b/vendor/github.com/opencontainers/runc/kill.go deleted file mode 100644 index 3da8aaf6..00000000 --- a/vendor/github.com/opencontainers/runc/kill.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "strconv" - "strings" - "syscall" - - "github.com/codegangsta/cli" -) - -var signalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} - -var killCommand = cli.Command{ - Name: "kill", - Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process", - ArgsUsage: ` - -Where "" is the name for the instance of the container and -"" is the signal to be sent to the init process. - -For example, if the container id is "ubuntu01" the following will send a "KILL" -signal to the init process of the "ubuntu01" container: - - # runc kill ubuntu01 KILL`, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - - sigstr := context.Args().Get(1) - if sigstr == "" { - sigstr = "SIGTERM" - } - - signal, err := parseSignal(sigstr) - if err != nil { - fatal(err) - } - - if err := container.Signal(signal); err != nil { - fatal(err) - } - }, -} - -func parseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - return syscall.Signal(s), nil - } - signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - return signal, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/BUILD.bazel b/vendor/github.com/opencontainers/runc/libcontainer/BUILD.bazel deleted file mode 100644 index e7163517..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/BUILD.bazel +++ /dev/null @@ -1,104 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "console.go", - "container.go", - "error.go", - "factory.go", - "generic_error.go", - "process.go", - "stats.go", - ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "console_freebsd.go", - "criu_opts_unix.go", - "stats_freebsd.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "capabilities_linux.go", - "compat_1.5_linux.go", - "console_linux.go", - "container_linux.go", - "criu_opts_unix.go", - "factory_linux.go", - "init_linux.go", - "message_linux.go", - "network_linux.go", - "notify_linux.go", - "process_linux.go", - "restored_process.go", - "rootfs_linux.go", - "setgroups_linux.go", - "setns_init_linux.go", - "standard_init_linux.go", - "state_linux.go", - "stats_linux.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "console_windows.go", - "container_windows.go", - "criu_opts_windows.go", - "stats_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/opencontainers/runc/libcontainer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/stacktrace:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/Sirupsen/logrus:go_default_library", - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - "//vendor/github.com/docker/docker/pkg/symlink:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/apparmor:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs/validate:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/criurpc:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/keys:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/label:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/seccomp:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", - "//vendor/github.com/syndtr/gocapability/capability:go_default_library", - "//vendor/github.com/vishvananda/netlink:go_default_library", - "//vendor/github.com/vishvananda/netlink/nl:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "error_test.go", - "generic_error_test.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "container_linux_test.go", - "factory_linux_test.go", - "notify_linux_test.go", - "rootfs_linux_test.go", - "state_linux_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "github.com/opencontainers/runc/libcontainer", - deps = select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md deleted file mode 100644 index 61496946..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/README.md +++ /dev/null @@ -1,241 +0,0 @@ -Libcontainer provides a native Go implementation for creating containers -with namespaces, cgroups, capabilities, and filesystem access controls. -It allows you to manage the lifecycle of the container performing additional operations -after the container is created. - - -#### Container -A container is a self contained execution environment that shares the kernel of the -host system and which is (optionally) isolated from other containers in the system. - -#### Using libcontainer - -Because containers are spawned in a two step process you will need a binary that -will be executed as the init process for the container. In libcontainer, we use -the current binary (/proc/self/exe) to be executed as the init process, and use -arg "init", we call the first step process "bootstrap", so you always need a "init" -function as the entry of "bootstrap". - -```go -func init() { - if len(os.Args) > 1 && os.Args[1] == "init" { - runtime.GOMAXPROCS(1) - runtime.LockOSThread() - factory, _ := libcontainer.New("") - if err := factory.StartInitialization(); err != nil { - logrus.Fatal(err) - } - panic("--this line should have never been executed, congratulations--") - } -} -``` - -Then to create a container you first have to initialize an instance of a factory -that will handle the creation and initialization for a container. - -```go -factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init")) -if err != nil { - logrus.Fatal(err) - return -} -``` - -Once you have an instance of the factory created we can create a configuration -struct describing how the container is to be created. A sample would look similar to this: - -```go -defaultMountFlags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV -config := &configs.Config{ - Rootfs: "/your/path/to/rootfs", - Capabilities: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - Namespaces: configs.Namespaces([]configs.Namespace{ - {Type: configs.NEWNS}, - {Type: configs.NEWUTS}, - {Type: configs.NEWIPC}, - {Type: configs.NEWPID}, - {Type: configs.NEWUSER}, - {Type: configs.NEWNET}, - }), - Cgroups: &configs.Cgroup{ - Name: "test-container", - Parent: "system", - Resources: &configs.Resources{ - MemorySwappiness: nil, - AllowAllDevices: false, - AllowedDevices: configs.DefaultAllowedDevices, - }, - }, - MaskPaths: []string{ - "/proc/kcore", - }, - ReadonlyPaths: []string{ - "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", - }, - Devices: configs.DefaultAutoCreatedDevices, - Hostname: "testing", - Mounts: []*configs.Mount{ - { - Source: "proc", - Destination: "/proc", - Device: "proc", - Flags: defaultMountFlags, - }, - { - Source: "tmpfs", - Destination: "/dev", - Device: "tmpfs", - Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, - Data: "mode=755", - }, - { - Source: "devpts", - Destination: "/dev/pts", - Device: "devpts", - Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, - Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", - }, - { - Device: "tmpfs", - Source: "shm", - Destination: "/dev/shm", - Data: "mode=1777,size=65536k", - Flags: defaultMountFlags, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Device: "mqueue", - Flags: defaultMountFlags, - }, - { - Source: "sysfs", - Destination: "/sys", - Device: "sysfs", - Flags: defaultMountFlags | syscall.MS_RDONLY, - }, - }, - UidMappings: []configs.IDMap{ - { - ContainerID: 0, - HostID: 1000, - Size: 65536, - }, - }, - GidMappings: []configs.IDMap{ - { - ContainerID: 0, - HostID: 1000, - Size: 65536, - }, - }, - Networks: []*configs.Network{ - { - Type: "loopback", - Address: "127.0.0.1/0", - Gateway: "localhost", - }, - }, - Rlimits: []configs.Rlimit{ - { - Type: syscall.RLIMIT_NOFILE, - Hard: uint64(1025), - Soft: uint64(1025), - }, - }, -} -``` - -Once you have the configuration populated you can create a container: - -```go -container, err := factory.Create("container-id", config) -if err != nil { - logrus.Fatal(err) - return -} -``` - -To spawn bash as the initial process inside the container and have the -processes pid returned in order to wait, signal, or kill the process: - -```go -process := &libcontainer.Process{ - Args: []string{"/bin/bash"}, - Env: []string{"PATH=/bin"}, - User: "daemon", - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, -} - -err := container.Start(process) -if err != nil { - logrus.Fatal(err) - container.Destroy() - return -} - -// wait for the process to finish. -_, err := process.Wait() -if err != nil { - logrus.Fatal(err) -} - -// destroy the container. -container.Destroy() -``` - -Additional ways to interact with a running container are: - -```go -// return all the pids for all processes running inside the container. -processes, err := container.Processes() - -// get detailed cpu, memory, io, and network statistics for the container and -// it's processes. -stats, err := container.Stats() - -// pause all processes inside the container. -container.Pause() - -// resume all paused processes. -container.Resume() - -// send signal to container's init process. -container.Signal(signal) -``` - - -#### Checkpoint & Restore - -libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers. -This let's you save the state of a process running inside a container to disk, and then restore -that state into a new process, on the same machine or on another machine. - -`criu` version 1.5.2 or higher is required to use checkpoint and restore. -If you don't already have `criu` installed, you can build it from source, following the -[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image -generated when building libcontainer with docker. - - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md deleted file mode 100644 index 221545c0..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md +++ /dev/null @@ -1,335 +0,0 @@ -## Container Specification - v1 - -This is the standard configuration for version 1 containers. It includes -namespaces, standard filesystem setup, a default Linux capability set, and -information about resource reservations. It also has information about any -populated environment settings for the processes running inside a container. - -Along with the configuration of how a container is created the standard also -discusses actions that can be performed on a container to manage and inspect -information about the processes running inside. - -The v1 profile is meant to be able to accommodate the majority of applications -with a strong security configuration. - -### System Requirements and Compatibility - -Minimum requirements: -* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches) -* Mounted cgroups with each subsystem in its own hierarchy - - -### Namespaces - -| Flag | Enabled | -| ------------ | ------- | -| CLONE_NEWPID | 1 | -| CLONE_NEWUTS | 1 | -| CLONE_NEWIPC | 1 | -| CLONE_NEWNET | 1 | -| CLONE_NEWNS | 1 | -| CLONE_NEWUSER | 1 | - -Namespaces are created for the container via the `clone` syscall. - - -### Filesystem - -A root filesystem must be provided to a container for execution. The container -will use this root filesystem (rootfs) to jail and spawn processes inside where -the binaries and system libraries are local to that directory. Any binaries -to be executed must be contained within this rootfs. - -Mounts that happen inside the container are automatically cleaned up when the -container exits as the mount namespace is destroyed and the kernel will -unmount all the mounts that were setup within that namespace. - -For a container to execute properly there are certain filesystems that -are required to be mounted within the rootfs that the runtime will setup. - -| Path | Type | Flags | Data | -| ----------- | ------ | -------------------------------------- | ---------------------------------------- | -| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | -| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | -| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 | -| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | - - -After a container's filesystems are mounted within the newly created -mount namespace `/dev` will need to be populated with a set of device nodes. -It is expected that a rootfs does not need to have any device nodes specified -for `/dev` within the rootfs as the container will setup the correct devices -that are required for executing a container's process. - -| Path | Mode | Access | -| ------------ | ---- | ---------- | -| /dev/null | 0666 | rwm | -| /dev/zero | 0666 | rwm | -| /dev/full | 0666 | rwm | -| /dev/tty | 0666 | rwm | -| /dev/random | 0666 | rwm | -| /dev/urandom | 0666 | rwm | -| /dev/fuse | 0666 | rwm | - - -**ptmx** -`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within -the container. - -The use of a pseudo TTY is optional within a container and it should support both. -If a pseudo is provided to the container `/dev/console` will need to be -setup by binding the console in `/dev/` after it has been populated and mounted -in tmpfs. - -| Source | Destination | UID GID | Mode | Type | -| --------------- | ------------ | ------- | ---- | ---- | -| *pty host path* | /dev/console | 0 0 | 0600 | bind | - - -After `/dev/null` has been setup we check for any external links between -the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing -to `/dev/null` outside the container we close and `dup2` the the `/dev/null` -that is local to the container's rootfs. - - -After the container has `/proc` mounted a few standard symlinks are setup -within `/dev/` for the io. - -| Source | Destination | -| --------------- | ----------- | -| /proc/self/fd | /dev/fd | -| /proc/self/fd/0 | /dev/stdin | -| /proc/self/fd/1 | /dev/stdout | -| /proc/self/fd/2 | /dev/stderr | - -A `pivot_root` is used to change the root for the process, effectively -jailing the process inside the rootfs. - -```c -put_old = mkdir(...); -pivot_root(rootfs, put_old); -chdir("/"); -unmount(put_old, MS_DETACH); -rmdir(put_old); -``` - -For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined -with a `chroot` is required as `pivot_root` is not supported in `ramfs`. - -```c -mount(rootfs, "/", NULL, MS_MOVE, NULL); -chroot("."); -chdir("/"); -``` - -The `umask` is set back to `0022` after the filesystem setup has been completed. - -### Resources - -Cgroups are used to handle resource allocation for containers. This includes -system resources like cpu, memory, and device access. - -| Subsystem | Enabled | -| ---------- | ------- | -| devices | 1 | -| memory | 1 | -| cpu | 1 | -| cpuacct | 1 | -| cpuset | 1 | -| blkio | 1 | -| perf_event | 1 | -| freezer | 1 | -| hugetlb | 1 | -| pids | 1 | - - -All cgroup subsystem are joined so that statistics can be collected from -each of the subsystems. Freezer does not expose any stats but is joined -so that containers can be paused and resumed. - -The parent process of the container's init must place the init pid inside -the correct cgroups before the initialization begins. This is done so -that no processes or threads escape the cgroups. This sync is -done via a pipe ( specified in the runtime section below ) that the container's -init process will block waiting for the parent to finish setup. - -### Security - -The standard set of Linux capabilities that are set in a container -provide a good default for security and flexibility for the applications. - - -| Capability | Enabled | -| -------------------- | ------- | -| CAP_NET_RAW | 1 | -| CAP_NET_BIND_SERVICE | 1 | -| CAP_AUDIT_READ | 1 | -| CAP_AUDIT_WRITE | 1 | -| CAP_DAC_OVERRIDE | 1 | -| CAP_SETFCAP | 1 | -| CAP_SETPCAP | 1 | -| CAP_SETGID | 1 | -| CAP_SETUID | 1 | -| CAP_MKNOD | 1 | -| CAP_CHOWN | 1 | -| CAP_FOWNER | 1 | -| CAP_FSETID | 1 | -| CAP_KILL | 1 | -| CAP_SYS_CHROOT | 1 | -| CAP_NET_BROADCAST | 0 | -| CAP_SYS_MODULE | 0 | -| CAP_SYS_RAWIO | 0 | -| CAP_SYS_PACCT | 0 | -| CAP_SYS_ADMIN | 0 | -| CAP_SYS_NICE | 0 | -| CAP_SYS_RESOURCE | 0 | -| CAP_SYS_TIME | 0 | -| CAP_SYS_TTY_CONFIG | 0 | -| CAP_AUDIT_CONTROL | 0 | -| CAP_MAC_OVERRIDE | 0 | -| CAP_MAC_ADMIN | 0 | -| CAP_NET_ADMIN | 0 | -| CAP_SYSLOG | 0 | -| CAP_DAC_READ_SEARCH | 0 | -| CAP_LINUX_IMMUTABLE | 0 | -| CAP_IPC_LOCK | 0 | -| CAP_IPC_OWNER | 0 | -| CAP_SYS_PTRACE | 0 | -| CAP_SYS_BOOT | 0 | -| CAP_LEASE | 0 | -| CAP_WAKE_ALARM | 0 | -| CAP_BLOCK_SUSPEND | 0 | - - -Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) -and [selinux](http://selinuxproject.org/page/Main_Page) can be used with -the containers. A container should support setting an apparmor profile or -selinux process and mount labels if provided in the configuration. - -Standard apparmor profile: -```c -#include -profile flags=(attach_disconnected,mediate_deleted) { - #include - network, - capability, - file, - umount, - - deny @{PROC}/sys/fs/** wklx, - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, - deny @{PROC}/sys/kernel/*/** wklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, -} -``` - -*TODO: seccomp work is being done to find a good default config* - -### Runtime and Init Process - -During container creation the parent process needs to talk to the container's init -process and have a form of synchronization. This is accomplished by creating -a pipe that is passed to the container's init. When the init process first spawns -it will block on its side of the pipe until the parent closes its side. This -allows the parent to have time to set the new process inside a cgroup hierarchy -and/or write any uid/gid mappings required for user namespaces. -The pipe is passed to the init process via FD 3. - -The application consuming libcontainer should be compiled statically. libcontainer -does not define any init process and the arguments provided are used to `exec` the -process inside the application. There should be no long running init within the -container spec. - -If a pseudo tty is provided to a container it will open and `dup2` the console -as the container's STDIN, STDOUT, STDERR as well as mounting the console -as `/dev/console`. - -An extra set of mounts are provided to a container and setup for use. A container's -rootfs can contain some non portable files inside that can cause side effects during -execution of a process. These files are usually created and populated with the container -specific information via the runtime. - -**Extra runtime files:** -* /etc/hosts -* /etc/resolv.conf -* /etc/hostname -* /etc/localtime - - -#### Defaults - -There are a few defaults that can be overridden by users, but in their omission -these apply to processes within a container. - -| Type | Value | -| ------------------- | ------------------------------ | -| Parent Death Signal | SIGKILL | -| UID | 0 | -| GID | 0 | -| GROUPS | 0, NULL | -| CWD | "/" | -| $HOME | Current user's home dir or "/" | -| Readonly rootfs | false | -| Pseudo TTY | false | - - -## Actions - -After a container is created there is a standard set of actions that can -be done to the container. These actions are part of the public API for -a container. - -| Action | Description | -| -------------- | ------------------------------------------------------------------ | -| Get processes | Return all the pids for processes running inside a container | -| Get Stats | Return resource statistics for the container as a whole | -| Wait | Wait waits on the container's init process ( pid 1 ) | -| Wait Process | Wait on any of the container's processes returning the exit status | -| Destroy | Kill the container's init process and remove any filesystem state | -| Signal | Send a signal to the container's init process | -| Signal Process | Send a signal to any of the container's processes | -| Pause | Pause all processes inside the container | -| Resume | Resume all processes inside the container if paused | -| Exec | Execute a new process inside of the container ( requires setns ) | -| Set | Setup configs of the container after it's created | - -### Execute a new process inside of a running container. - -User can execute a new process inside of a running container. Any binaries to be -executed must be accessible within the container's rootfs. - -The started process will run inside the container's rootfs. Any changes -made by the process to the container's filesystem will persist after the -process finished executing. - -The started process will join all the container's existing namespaces. When the -container is paused, the process will also be paused and will resume when -the container is unpaused. The started process will only run when the container's -primary process (PID 1) is running, and will not be restarted when the container -is restarted. - -#### Planned additions - -The started process will have its own cgroups nested inside the container's -cgroups. This is used for process tracking and optionally resource allocation -handling for the new process. Freezer cgroup is required, the rest of the cgroups -are optional. The process executor must place its pid inside the correct -cgroups before starting the process. This is done so that no child processes or -threads can escape the cgroups. - -When the process is stopped, the process executor will try (in a best-effort way) -to stop all its children and remove the sub-cgroups. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go deleted file mode 100644 index 4eda56d1..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "strings" - - "github.com/syndtr/gocapability/capability" -) - -const allCapabilityTypes = capability.CAPS | capability.BOUNDS - -var capabilityMap map[string]capability.Cap - -func init() { - capabilityMap = make(map[string]capability.Cap) - last := capability.CAP_LAST_CAP - // workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - for _, cap := range capability.List() { - if cap > last { - continue - } - capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) - capabilityMap[capKey] = cap - } -} - -func newCapWhitelist(caps []string) (*whitelist, error) { - l := []capability.Cap{} - for _, c := range caps { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - l = append(l, v) - } - pid, err := capability.NewPid(os.Getpid()) - if err != nil { - return nil, err - } - return &whitelist{ - keep: l, - pid: pid, - }, nil -} - -type whitelist struct { - pid capability.Capabilities - keep []capability.Cap -} - -// dropBoundingSet drops the capability bounding set to those specified in the whitelist. -func (w *whitelist) dropBoundingSet() error { - w.pid.Clear(capability.BOUNDS) - w.pid.Set(capability.BOUNDS, w.keep...) - return w.pid.Apply(capability.BOUNDS) -} - -// drop drops all capabilities for the current process except those specified in the whitelist. -func (w *whitelist) drop() error { - w.pid.Clear(allCapabilityTypes) - w.pid.Set(allCapabilityTypes, w.keep...) - return w.pid.Apply(allCapabilityTypes) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go deleted file mode 100644 index c7bdf1f6..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux,!go1.5 - -package libcontainer - -import "syscall" - -// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building -// with earlier versions -func enableSetgroups(sys *syscall.SysProcAttr) { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go deleted file mode 100644 index 042a2a2e..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -import "io" - -// Console represents a pseudo TTY. -type Console interface { - io.ReadWriter - io.Closer - - // Path returns the filesystem path to the slave side of the pty. - Path() string - - // Fd returns the fd for the master of the pty. - Fd() uintptr -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go deleted file mode 100644 index 3c89eda0..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build freebsd - -package libcontainer - -import ( - "errors" -) - -// NewConsole returns an initalized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func NewConsole(uid, gid int) (Console, error) { - return nil, errors.New("libcontainer console is not supported on FreeBSD") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go deleted file mode 100644 index 7af771b6..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -package libcontainer - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "unsafe" - - "github.com/opencontainers/runc/libcontainer/label" -) - -// NewConsole returns an initalized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func NewConsole(uid, gid int) (Console, error) { - master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - console, err := ptsname(master) - if err != nil { - return nil, err - } - if err := unlockpt(master); err != nil { - return nil, err - } - if err := os.Chmod(console, 0600); err != nil { - return nil, err - } - if err := os.Chown(console, uid, gid); err != nil { - return nil, err - } - return &linuxConsole{ - slavePath: console, - master: master, - }, nil -} - -// newConsoleFromPath is an internal function returning an initialized console for use inside -// a container's MNT namespace. -func newConsoleFromPath(slavePath string) *linuxConsole { - return &linuxConsole{ - slavePath: slavePath, - } -} - -// linuxConsole is a linux psuedo TTY for use within a container. -type linuxConsole struct { - master *os.File - slavePath string -} - -func (c *linuxConsole) Fd() uintptr { - return c.master.Fd() -} - -func (c *linuxConsole) Path() string { - return c.slavePath -} - -func (c *linuxConsole) Read(b []byte) (int, error) { - return c.master.Read(b) -} - -func (c *linuxConsole) Write(b []byte) (int, error) { - return c.master.Write(b) -} - -func (c *linuxConsole) Close() error { - if m := c.master; m != nil { - return m.Close() - } - return nil -} - -// mount initializes the console inside the rootfs mounting with the specified mount label -// and applying the correct ownership of the console. -func (c *linuxConsole) mount(rootfs, mountLabel string) error { - oldMask := syscall.Umask(0000) - defer syscall.Umask(oldMask) - if err := label.SetFileLabel(c.slavePath, mountLabel); err != nil { - return err - } - dest := filepath.Join(rootfs, "/dev/console") - f, err := os.Create(dest) - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return syscall.Mount(c.slavePath, dest, "bind", syscall.MS_BIND, "") -} - -// dupStdio opens the slavePath for the console and dups the fds to the current -// processes stdio, fd 0,1,2. -func (c *linuxConsole) dupStdio() error { - slave, err := c.open(syscall.O_RDWR) - if err != nil { - return err - } - fd := int(slave.Fd()) - for _, i := range []int{0, 1, 2} { - if err := syscall.Dup3(fd, i, 0); err != nil { - return err - } - } - return nil -} - -// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. -func (c *linuxConsole) open(flag int) (*os.File, error) { - r, e := syscall.Open(c.slavePath, flag, 0) - if e != nil { - return nil, &os.PathError{ - Op: "open", - Path: c.slavePath, - Err: e, - } - } - return os.NewFile(uintptr(r), c.slavePath), nil -} - -func ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var n int32 - if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go deleted file mode 100644 index a68c02f6..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainer - -// NewConsole returns an initalized console that can be used within a container -func NewConsole(uid, gid int) (Console, error) { - return &windowsConsole{}, nil -} - -// windowsConsole is a Windows psuedo TTY for use within a container. -type windowsConsole struct { -} - -func (c *windowsConsole) Fd() uintptr { - return 0 -} - -func (c *windowsConsole) Path() string { - return "" -} - -func (c *windowsConsole) Read(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Write(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Close() error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container.go b/vendor/github.com/opencontainers/runc/libcontainer/container.go deleted file mode 100644 index 32daa976..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container.go +++ /dev/null @@ -1,144 +0,0 @@ -// Libcontainer provides a native Go implementation for creating containers -// with namespaces, cgroups, capabilities, and filesystem access controls. -// It allows you to manage the lifecycle of the container performing additional operations -// after the container is created. -package libcontainer - -import ( - "os" - "time" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -// The status of a container. -type Status int - -const ( - // The container exists but has not been run yet - Created Status = iota - - // The container exists and is running. - Running - - // The container exists, it is in the process of being paused. - Pausing - - // The container exists, but all its processes are paused. - Paused - - // The container does not exist. - Destroyed -) - -func (s Status) String() string { - switch s { - case Created: - return "created" - case Running: - return "running" - case Pausing: - return "pausing" - case Paused: - return "paused" - case Destroyed: - return "destroyed" - default: - return "unknown" - } -} - -// BaseState represents the platform agnostic pieces relating to a -// running container's state -type BaseState struct { - // ID is the container ID. - ID string `json:"id"` - - // InitProcessPid is the init process id in the parent namespace. - InitProcessPid int `json:"init_process_pid"` - - // InitProcessStartTime is the init process start time in clock cycles since boot time. - InitProcessStartTime string `json:"init_process_start"` - - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - - // Config is the container's configuration. - Config configs.Config `json:"config"` -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. BaseContainer includes methods that are platform agnostic. -type BaseContainer interface { - // Returns the ID of the container - ID() string - - // Returns the current status of the container. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Status() (Status, error) - - // State returns the current container's state information. - // - // errors: - // Systemerror - System error. - State() (*State, error) - - // Returns the current config of the container. - Config() configs.Config - - // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - // - // Some of the returned PIDs may no longer refer to processes in the Container, unless - // the Container state is PAUSED in which case every PID in the slice is valid. - Processes() ([]int, error) - - // Returns statistics for the container. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Stats() (*Stats, error) - - // Set resources of container as configured - // - // We can use this to change resources when containers are running. - // - // errors: - // Systemerror - System error. - Set(config configs.Config) error - - // Start a process inside the container. Returns error if process fails to - // start. You can track process lifecycle with passed Process structure. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // ConfigInvalid - config is invalid, - // ContainerPaused - Container is paused, - // Systemerror - System error. - Start(process *Process) (err error) - - // Destroys the container after killing all running processes. - // - // Any event registrations are removed before the container is destroyed. - // No error is returned if the container is already destroyed. - // - // errors: - // Systemerror - System error. - Destroy() error - - // Signal sends the provided signal code to the container's initial process. - // - // errors: - // Systemerror - System error. - Signal(s os.Signal) error -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go deleted file mode 100644 index 2ae50c46..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ /dev/null @@ -1,1228 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/golang/protobuf/proto" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/criurpc" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/syndtr/gocapability/capability" - "github.com/vishvananda/netlink/nl" -) - -const stdioFdCount = 3 - -type linuxContainer struct { - id string - root string - config *configs.Config - cgroupManager cgroups.Manager - initPath string - initArgs []string - initProcess parentProcess - criuPath string - m sync.Mutex - criuVersion int - state containerState - created time.Time -} - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here - - // Path to all the cgroups setup for a container. Key is cgroup subsystem name - // with the value as the path. - CgroupPaths map[string]string `json:"cgroup_paths"` - - // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type - // with the value as the path. - NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"` - - // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore - ExternalDescriptors []string `json:"external_descriptors,omitempty"` -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - - // Checkpoint checkpoints the running container's state to disk using the criu(8) utility. - // - // errors: - // Systemerror - System error. - Checkpoint(criuOpts *CriuOpts) error - - // Restore restores the checkpointed container to a running state using the criu(8) utility. - // - // errors: - // Systemerror - System error. - Restore(process *Process, criuOpts *CriuOpts) error - - // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses - // the execution of any user processes. Asynchronously, when the container finished being paused the - // state is changed to PAUSED. - // If the Container state is PAUSED, do nothing. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Pause() error - - // If the Container state is PAUSED, resumes the execution of any user processes in the - // Container before setting the Container state to RUNNING. - // If the Container state is RUNNING, do nothing. - // - // errors: - // ContainerDestroyed - Container no longer exists, - // Systemerror - System error. - Resume() error - - // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification. - // - // errors: - // Systemerror - System error. - NotifyOOM() (<-chan struct{}, error) - - // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level - // - // errors: - // Systemerror - System error. - NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) -} - -// ID returns the container's unique ID -func (c *linuxContainer) ID() string { - return c.id -} - -// Config returns the container's configuration -func (c *linuxContainer) Config() configs.Config { - return *c.config -} - -func (c *linuxContainer) Status() (Status, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentStatus() -} - -func (c *linuxContainer) State() (*State, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentState() -} - -func (c *linuxContainer) Processes() ([]int, error) { - pids, err := c.cgroupManager.GetAllPids() - if err != nil { - return nil, newSystemError(err) - } - return pids, nil -} - -func (c *linuxContainer) Stats() (*Stats, error) { - var ( - err error - stats = &Stats{} - ) - if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { - return stats, newSystemError(err) - } - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) - if err != nil { - return stats, newSystemError(err) - } - stats.Interfaces = append(stats.Interfaces, istats) - } - } - return stats, nil -} - -func (c *linuxContainer) Set(config configs.Config) error { - c.m.Lock() - defer c.m.Unlock() - c.config = &config - return c.cgroupManager.Set(c.config) -} - -func (c *linuxContainer) Start(process *Process) error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - doInit := status == Destroyed - parent, err := c.newParentProcess(process, doInit) - if err != nil { - return newSystemError(err) - } - if err := parent.start(); err != nil { - // terminate the process to ensure that it properly is reaped. - if err := parent.terminate(); err != nil { - logrus.Warn(err) - } - return newSystemError(err) - } - // generate a timestamp indicating when the container was started - c.created = time.Now().UTC() - - c.state = &runningState{ - c: c, - } - if doInit { - if err := c.updateState(parent); err != nil { - return err - } - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Pid: parent.pid(), - Root: c.config.Rootfs, - BundlePath: utils.SearchLabels(c.config.Labels, "bundle"), - } - for _, hook := range c.config.Hooks.Poststart { - if err := hook.Run(s); err != nil { - if err := parent.terminate(); err != nil { - logrus.Warn(err) - } - return newSystemError(err) - } - } - } - } - return nil -} - -func (c *linuxContainer) Signal(s os.Signal) error { - if err := c.initProcess.signal(s); err != nil { - return newSystemError(err) - } - return nil -} - -func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) { - parentPipe, childPipe, err := newPipe() - if err != nil { - return nil, newSystemError(err) - } - cmd, err := c.commandTemplate(p, childPipe) - if err != nil { - return nil, newSystemError(err) - } - if !doInit { - return c.newSetnsProcess(p, cmd, parentPipe, childPipe) - } - return c.newInitProcess(p, cmd, parentPipe, childPipe) -} - -func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { - cmd := &exec.Cmd{ - Path: c.initPath, - Args: c.initArgs, - } - cmd.Stdin = p.Stdin - cmd.Stdout = p.Stdout - cmd.Stderr = p.Stderr - cmd.Dir = c.config.Rootfs - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.ExtraFiles = append(p.ExtraFiles, childPipe) - cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) - // NOTE: when running a container with no PID namespace and the parent process spawning the container is - // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason - // even with the parent still running. - if c.config.ParentDeathSignal > 0 { - cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) - } - return cmd, nil -} - -func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) { - cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard)) - nsMaps := make(map[configs.NamespaceType]string) - for _, ns := range c.config.Namespaces { - if ns.Path != "" { - nsMaps[ns.Type] = ns.Path - } - } - _, sharePidns := nsMaps[configs.NEWPID] - data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps, "") - if err != nil { - return nil, err - } - return &initProcess{ - cmd: cmd, - childPipe: childPipe, - parentPipe: parentPipe, - manager: c.cgroupManager, - config: c.newInitConfig(p), - container: c, - process: p, - bootstrapData: data, - sharePidns: sharePidns, - }, nil -} - -func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) { - cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns)) - state, err := c.currentState() - if err != nil { - return nil, newSystemError(err) - } - // for setns process, we dont have to set cloneflags as the process namespaces - // will only be set via setns syscall - data, err := c.bootstrapData(0, state.NamespacePaths, p.consolePath) - if err != nil { - return nil, err - } - // TODO: set on container for process management - return &setnsProcess{ - cmd: cmd, - cgroupPaths: c.cgroupManager.GetPaths(), - childPipe: childPipe, - parentPipe: parentPipe, - config: c.newInitConfig(p), - process: p, - bootstrapData: data, - }, nil -} - -func (c *linuxContainer) newInitConfig(process *Process) *initConfig { - cfg := &initConfig{ - Config: c.config, - Args: process.Args, - Env: process.Env, - User: process.User, - Cwd: process.Cwd, - Console: process.consolePath, - Capabilities: process.Capabilities, - PassedFilesCount: len(process.ExtraFiles), - ContainerId: c.ID(), - NoNewPrivileges: c.config.NoNewPrivileges, - AppArmorProfile: c.config.AppArmorProfile, - ProcessLabel: c.config.ProcessLabel, - Rlimits: c.config.Rlimits, - } - if process.NoNewPrivileges != nil { - cfg.NoNewPrivileges = *process.NoNewPrivileges - } - if process.AppArmorProfile != "" { - cfg.AppArmorProfile = process.AppArmorProfile - } - if process.Label != "" { - cfg.ProcessLabel = process.Label - } - if len(process.Rlimits) > 0 { - cfg.Rlimits = process.Rlimits - } - return cfg -} - -func newPipe() (parent *os.File, child *os.File, err error) { - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil -} - -func (c *linuxContainer) Destroy() error { - c.m.Lock() - defer c.m.Unlock() - return c.state.destroy() -} - -func (c *linuxContainer) Pause() error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status != Running { - return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning) - } - if err := c.cgroupManager.Freeze(configs.Frozen); err != nil { - return err - } - return c.state.transition(&pausedState{ - c: c, - }) -} - -func (c *linuxContainer) Resume() error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status != Paused { - return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused) - } - if err := c.cgroupManager.Freeze(configs.Thawed); err != nil { - return err - } - return c.state.transition(&runningState{ - c: c, - }) -} - -func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { - return notifyOnOOM(c.cgroupManager.GetPaths()) -} - -func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) { - return notifyMemoryPressure(c.cgroupManager.GetPaths(), level) -} - -// check Criu version greater than or equal to min_version -func (c *linuxContainer) checkCriuVersion(min_version string) error { - var x, y, z, versionReq int - - _, err := fmt.Sscanf(min_version, "%d.%d.%d\n", &x, &y, &z) // 1.5.2 - if err != nil { - _, err = fmt.Sscanf(min_version, "Version: %d.%d\n", &x, &y) // 1.6 - } - versionReq = x*10000 + y*100 + z - - out, err := exec.Command(c.criuPath, "-V").Output() - if err != nil { - return fmt.Errorf("Unable to execute CRIU command: %s", c.criuPath) - } - - x = 0 - y = 0 - z = 0 - if ep := strings.Index(string(out), "-"); ep >= 0 { - // criu Git version format - var version string - if sp := strings.Index(string(out), "GitID"); sp > 0 { - version = string(out)[sp:ep] - } else { - return fmt.Errorf("Unable to parse the CRIU version: %s", c.criuPath) - } - - n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 - if err != nil { - n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6 - y++ - } else { - z++ - } - if n < 2 || err != nil { - return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err) - } - } else { - // criu release version format - n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 - if err != nil { - n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 - } - if n < 2 || err != nil { - return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) - } - } - - c.criuVersion = x*10000 + y*100 + z - - if c.criuVersion < versionReq { - return fmt.Errorf("CRIU version must be %s or higher", min_version) - } - - return nil -} - -const descriptorsFilename = "descriptors.json" - -func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := &criurpc.ExtMountMap{ - Key: proto.String(mountDest), - Val: proto.String(mountDest), - } - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) -} - -func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - - if err := c.checkCriuVersion("1.5.2"); err != nil { - return err - } - - if criuOpts.ImagesDirectory == "" { - return fmt.Errorf("invalid directory to save checkpoint") - } - - // Since a container can be C/R'ed multiple times, - // the checkpoint directory may already exist. - if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - - if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - - rpcOpts := criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - LogLevel: proto.Int32(4), - LogFile: proto.String("dump.log"), - Root: proto.String(c.config.Rootfs), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - Pid: proto.Int32(int32(c.initProcess.pid())), - ShellJob: proto.Bool(criuOpts.ShellJob), - LeaveRunning: proto.Bool(criuOpts.LeaveRunning), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - FileLocks: proto.Bool(criuOpts.FileLocks), - EmptyNs: proto.Uint32(criuOpts.EmptyNs), - } - - // append optional criu opts, e.g., page-server and port - if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { - rpcOpts.Ps = &criurpc.CriuPageServerInfo{ - Address: proto.String(criuOpts.PageServer.Address), - Port: proto.Int32(criuOpts.PageServer.Port), - } - } - - // append optional manage cgroups mode - if criuOpts.ManageCgroupsMode != 0 { - if err := c.checkCriuVersion("1.7"); err != nil { - return err - } - mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) - rpcOpts.ManageCgroupsMode = &mode - } - - t := criurpc.CriuReqType_DUMP - req := &criurpc.CriuReq{ - Type: &t, - Opts: &rpcOpts, - } - - for _, m := range c.config.Mounts { - switch m.Device { - case "bind": - c.addCriuDumpMount(req, m) - break - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - for _, b := range binds { - c.addCriuDumpMount(req, b) - } - break - } - } - - // Write the FD info to a file in the image directory - - fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) - if err != nil { - return err - } - - err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) - if err != nil { - return err - } - - err = c.criuSwrk(nil, req, criuOpts, false) - if err != nil { - return err - } - return nil -} - -func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := &criurpc.ExtMountMap{ - Key: proto.String(mountDest), - Val: proto.String(m.Source), - } - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) -} - -func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - if err := c.checkCriuVersion("1.5.2"); err != nil { - return err - } - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - // Since a container can be C/R'ed multiple times, - // the work directory may already exist. - if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { - return err - } - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - if criuOpts.ImagesDirectory == "" { - return fmt.Errorf("invalid directory to restore checkpoint") - } - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - // CRIU has a few requirements for a root directory: - // * it must be a mount point - // * its parent must not be overmounted - // c.config.Rootfs is bind-mounted to a temporary directory - // to satisfy these requirements. - root := filepath.Join(c.root, "criu-root") - if err := os.Mkdir(root, 0755); err != nil { - return err - } - defer os.Remove(root) - root, err = filepath.EvalSymlinks(root) - if err != nil { - return err - } - err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "") - if err != nil { - return err - } - defer syscall.Unmount(root, syscall.MNT_DETACH) - t := criurpc.CriuReqType_RESTORE - req := &criurpc.CriuReq{ - Type: &t, - Opts: &criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - EvasiveDevices: proto.Bool(true), - LogLevel: proto.Int32(4), - LogFile: proto.String("restore.log"), - RstSibling: proto.Bool(true), - Root: proto.String(root), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - ShellJob: proto.Bool(criuOpts.ShellJob), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - FileLocks: proto.Bool(criuOpts.FileLocks), - EmptyNs: proto.Uint32(criuOpts.EmptyNs), - }, - } - - for _, m := range c.config.Mounts { - switch m.Device { - case "bind": - c.addCriuRestoreMount(req, m) - break - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - for _, b := range binds { - c.addCriuRestoreMount(req, b) - } - break - } - } - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - veth := new(criurpc.CriuVethPair) - veth.IfOut = proto.String(iface.HostInterfaceName) - veth.IfIn = proto.String(iface.Name) - req.Opts.Veths = append(req.Opts.Veths, veth) - break - case "loopback": - break - } - } - for _, i := range criuOpts.VethPairs { - veth := new(criurpc.CriuVethPair) - veth.IfOut = proto.String(i.HostInterfaceName) - veth.IfIn = proto.String(i.ContainerInterfaceName) - req.Opts.Veths = append(req.Opts.Veths, veth) - } - - // append optional manage cgroups mode - if criuOpts.ManageCgroupsMode != 0 { - if err := c.checkCriuVersion("1.7"); err != nil { - return err - } - mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) - req.Opts.ManageCgroupsMode = &mode - } - - var ( - fds []string - fdJSON []byte - ) - if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil { - return err - } - - if err := json.Unmarshal(fdJSON, &fds); err != nil { - return err - } - for i := range fds { - if s := fds[i]; strings.Contains(s, "pipe:") { - inheritFd := new(criurpc.InheritFd) - inheritFd.Key = proto.String(s) - inheritFd.Fd = proto.Int32(int32(i)) - req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) - } - } - return c.criuSwrk(process, req, criuOpts, true) -} - -func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error { - if err := c.cgroupManager.Apply(pid); err != nil { - return err - } - - path := fmt.Sprintf("/proc/%d/cgroup", pid) - cgroupsPaths, err := cgroups.ParseCgroupFile(path) - if err != nil { - return err - } - - for c, p := range cgroupsPaths { - cgroupRoot := &criurpc.CgroupRoot{ - Ctrl: proto.String(c), - Path: proto.String(p), - } - req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot) - } - - return nil -} - -func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error { - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return err - } - - logPath := filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile()) - criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") - criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") - defer criuClient.Close() - defer criuServer.Close() - - args := []string{"swrk", "3"} - logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath) - logrus.Debugf("Using CRIU with following args: %s", args) - cmd := exec.Command(c.criuPath, args...) - if process != nil { - cmd.Stdin = process.Stdin - cmd.Stdout = process.Stdout - cmd.Stderr = process.Stderr - } - cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) - - if err := cmd.Start(); err != nil { - return err - } - criuServer.Close() - - defer func() { - criuClient.Close() - _, err := cmd.Process.Wait() - if err != nil { - return - } - }() - - if applyCgroups { - err := c.criuApplyCgroups(cmd.Process.Pid, req) - if err != nil { - return err - } - } - - var extFds []string - if process != nil { - extFds, err = getPipeFds(cmd.Process.Pid) - if err != nil { - return err - } - } - - logrus.Debugf("Using CRIU in %s mode", req.GetType().String()) - val := reflect.ValueOf(req.GetOpts()) - v := reflect.Indirect(val) - for i := 0; i < v.NumField(); i++ { - st := v.Type() - name := st.Field(i).Name - if strings.HasPrefix(name, "XXX_") { - continue - } - value := val.MethodByName("Get" + name).Call([]reflect.Value{}) - logrus.Debugf("CRIU option %s with value %v", name, value[0]) - } - data, err := proto.Marshal(req) - if err != nil { - return err - } - _, err = criuClient.Write(data) - if err != nil { - return err - } - - buf := make([]byte, 10*4096) - for true { - n, err := criuClient.Read(buf) - if err != nil { - return err - } - if n == 0 { - return fmt.Errorf("unexpected EOF") - } - if n == len(buf) { - return fmt.Errorf("buffer is too small") - } - - resp := new(criurpc.CriuResp) - err = proto.Unmarshal(buf[:n], resp) - if err != nil { - return err - } - if !resp.GetSuccess() { - typeString := req.GetType().String() - return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath) - } - - t := resp.GetType() - switch { - case t == criurpc.CriuReqType_NOTIFY: - if err := c.criuNotifications(resp, process, opts, extFds); err != nil { - return err - } - t = criurpc.CriuReqType_NOTIFY - req = &criurpc.CriuReq{ - Type: &t, - NotifySuccess: proto.Bool(true), - } - data, err = proto.Marshal(req) - if err != nil { - return err - } - _, err = criuClient.Write(data) - if err != nil { - return err - } - continue - case t == criurpc.CriuReqType_RESTORE: - case t == criurpc.CriuReqType_DUMP: - break - default: - return fmt.Errorf("unable to parse the response %s", resp.String()) - } - - break - } - - // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. - // Here we want to wait only the CRIU process. - st, err := cmd.Process.Wait() - if err != nil { - return err - } - if !st.Success() { - return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath) - } - return nil -} - -// block any external network activity -func lockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - - if err := strategy.detach(config); err != nil { - return err - } - } - return nil -} - -func unlockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err = strategy.attach(config); err != nil { - return err - } - } - return nil -} - -func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error { - notify := resp.GetNotify() - if notify == nil { - return fmt.Errorf("invalid response: %s", resp.String()) - } - switch { - case notify.GetScript() == "post-dump": - f, err := os.Create(filepath.Join(c.root, "checkpoint")) - if err != nil { - return err - } - f.Close() - case notify.GetScript() == "network-unlock": - if err := unlockNetwork(c.config); err != nil { - return err - } - case notify.GetScript() == "network-lock": - if err := lockNetwork(c.config); err != nil { - return err - } - case notify.GetScript() == "setup-namespaces": - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Pid: int(notify.GetPid()), - Root: c.config.Rootfs, - } - for _, hook := range c.config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemError(err) - } - } - } - case notify.GetScript() == "post-restore": - pid := notify.GetPid() - r, err := newRestoredProcess(int(pid), fds) - if err != nil { - return err - } - process.ops = r - if err := c.state.transition(&restoredState{ - imageDir: opts.ImagesDirectory, - c: c, - }); err != nil { - return err - } - if err := c.updateState(r); err != nil { - return err - } - if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil { - if !os.IsNotExist(err) { - logrus.Error(err) - } - } - } - return nil -} - -func (c *linuxContainer) updateState(process parentProcess) error { - c.initProcess = process - state, err := c.currentState() - if err != nil { - return err - } - return c.saveState(state) -} - -func (c *linuxContainer) saveState(s *State) error { - f, err := os.Create(filepath.Join(c.root, stateFilename)) - if err != nil { - return err - } - defer f.Close() - return utils.WriteJSON(f, s) -} - -func (c *linuxContainer) deleteState() error { - return os.Remove(filepath.Join(c.root, stateFilename)) -} - -func (c *linuxContainer) currentStatus() (Status, error) { - if err := c.refreshState(); err != nil { - return -1, err - } - return c.state.status(), nil -} - -// refreshState needs to be called to verify that the current state on the -// container is what is true. Because consumers of libcontainer can use it -// out of process we need to verify the container's status based on runtime -// information and not rely on our in process info. -func (c *linuxContainer) refreshState() error { - paused, err := c.isPaused() - if err != nil { - return err - } - if paused { - return c.state.transition(&pausedState{c: c}) - } - running, err := c.isRunning() - if err != nil { - return err - } - if running { - return c.state.transition(&runningState{c: c}) - } - return c.state.transition(&stoppedState{c: c}) -} - -func (c *linuxContainer) isRunning() (bool, error) { - if c.initProcess == nil { - return false, nil - } - // return Running if the init process is alive - if err := syscall.Kill(c.initProcess.pid(), 0); err != nil { - if err == syscall.ESRCH { - return false, nil - } - return false, newSystemError(err) - } - return true, nil -} - -func (c *linuxContainer) isPaused() (bool, error) { - data, err := ioutil.ReadFile(filepath.Join(c.cgroupManager.GetPaths()["freezer"], "freezer.state")) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, newSystemError(err) - } - return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil -} - -func (c *linuxContainer) currentState() (*State, error) { - var ( - startTime string - externalDescriptors []string - pid = -1 - ) - if c.initProcess != nil { - pid = c.initProcess.pid() - startTime, _ = c.initProcess.startTime() - externalDescriptors = c.initProcess.externalDescriptors() - } - state := &State{ - BaseState: BaseState{ - ID: c.ID(), - Config: *c.config, - InitProcessPid: pid, - InitProcessStartTime: startTime, - Created: c.created, - }, - CgroupPaths: c.cgroupManager.GetPaths(), - NamespacePaths: make(map[configs.NamespaceType]string), - ExternalDescriptors: externalDescriptors, - } - if pid > 0 { - for _, ns := range c.config.Namespaces { - state.NamespacePaths[ns.Type] = ns.GetPath(pid) - } - for _, nsType := range configs.NamespaceTypes() { - if !configs.IsNamespaceSupported(nsType) { - continue - } - if _, ok := state.NamespacePaths[nsType]; !ok { - ns := configs.Namespace{Type: nsType} - state.NamespacePaths[ns.Type] = ns.GetPath(pid) - } - } - } - return state, nil -} - -// orderNamespacePaths sorts namespace paths into a list of paths that we -// can setns in order. -func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) { - paths := []string{} - nsTypes := []configs.NamespaceType{ - configs.NEWIPC, - configs.NEWUTS, - configs.NEWNET, - configs.NEWPID, - configs.NEWNS, - } - // join userns if the init process explicitly requires NEWUSER - if c.config.Namespaces.Contains(configs.NEWUSER) { - nsTypes = append(nsTypes, configs.NEWUSER) - } - for _, nsType := range nsTypes { - if p, ok := namespaces[nsType]; ok && p != "" { - // check if the requested namespace is supported - if !configs.IsNamespaceSupported(nsType) { - return nil, newSystemError(fmt.Errorf("namespace %s is not supported", nsType)) - } - // only set to join this namespace if it exists - if _, err := os.Lstat(p); err != nil { - return nil, newSystemError(err) - } - // do not allow namespace path with comma as we use it to separate - // the namespace paths - if strings.ContainsRune(p, ',') { - return nil, newSystemError(fmt.Errorf("invalid path %s", p)) - } - paths = append(paths, p) - } - } - return paths, nil -} - -func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) { - data := bytes.NewBuffer(nil) - for _, im := range idMap { - line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size) - if _, err := data.WriteString(line); err != nil { - return nil, err - } - } - return data.Bytes(), nil -} - -// bootstrapData encodes the necessary data in netlink binary format -// as a io.Reader. -// Consumer can write the data to a bootstrap program -// such as one that uses nsenter package to bootstrap the container's -// init process correctly, i.e. with correct namespaces, uid/gid -// mapping etc. -func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string, consolePath string) (io.Reader, error) { - // create the netlink message - r := nl.NewNetlinkRequest(int(InitMsg), 0) - - // write cloneFlags - r.AddData(&Int32msg{ - Type: CloneFlagsAttr, - Value: uint32(cloneFlags), - }) - - // write console path - if consolePath != "" { - r.AddData(&Bytemsg{ - Type: ConsolePathAttr, - Value: []byte(consolePath), - }) - } - - // write custom namespace paths - if len(nsMaps) > 0 { - nsPaths, err := c.orderNamespacePaths(nsMaps) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: NsPathsAttr, - Value: []byte(strings.Join(nsPaths, ",")), - }) - } - - // write namespace paths only when we are not joining an existing user ns - _, joinExistingUser := nsMaps[configs.NEWUSER] - if !joinExistingUser { - // write uid mappings - if len(c.config.UidMappings) > 0 { - b, err := encodeIDMapping(c.config.UidMappings) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: UidmapAttr, - Value: b, - }) - } - - // write gid mappings - if len(c.config.GidMappings) > 0 { - b, err := encodeIDMapping(c.config.GidMappings) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: GidmapAttr, - Value: b, - }) - // check if we have CAP_SETGID to setgroup properly - pid, err := capability.NewPid(os.Getpid()) - if err != nil { - return nil, err - } - if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) { - r.AddData(&Boolmsg{ - Type: SetgroupAttr, - Value: true, - }) - } - } - } - - return bytes.NewReader(r.Serialize()), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux_test.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux_test.go deleted file mode 100644 index 3af30bce..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "testing" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type mockCgroupManager struct { - pids []int - allPids []int - stats *cgroups.Stats - paths map[string]string -} - -func (m *mockCgroupManager) GetPids() ([]int, error) { - return m.pids, nil -} - -func (m *mockCgroupManager) GetAllPids() ([]int, error) { - return m.allPids, nil -} - -func (m *mockCgroupManager) GetStats() (*cgroups.Stats, error) { - return m.stats, nil -} - -func (m *mockCgroupManager) Apply(pid int) error { - return nil -} - -func (m *mockCgroupManager) Set(container *configs.Config) error { - return nil -} - -func (m *mockCgroupManager) Destroy() error { - return nil -} - -func (m *mockCgroupManager) GetPaths() map[string]string { - return m.paths -} - -func (m *mockCgroupManager) Freeze(state configs.FreezerState) error { - return nil -} - -type mockProcess struct { - _pid int - started string -} - -func (m *mockProcess) terminate() error { - return nil -} - -func (m *mockProcess) pid() int { - return m._pid -} - -func (m *mockProcess) startTime() (string, error) { - return m.started, nil -} - -func (m *mockProcess) start() error { - return nil -} - -func (m *mockProcess) wait() (*os.ProcessState, error) { - return nil, nil -} - -func (m *mockProcess) signal(_ os.Signal) error { - return nil -} - -func (p *mockProcess) externalDescriptors() []string { - return []string{} -} - -func (p *mockProcess) setExternalDescriptors(newFds []string) { -} - -func TestGetContainerPids(t *testing.T) { - container := &linuxContainer{ - id: "myid", - config: &configs.Config{}, - cgroupManager: &mockCgroupManager{allPids: []int{1, 2, 3}}, - } - pids, err := container.Processes() - if err != nil { - t.Fatal(err) - } - for i, expected := range []int{1, 2, 3} { - if pids[i] != expected { - t.Fatalf("expected pid %d but received %d", expected, pids[i]) - } - } -} - -func TestGetContainerStats(t *testing.T) { - container := &linuxContainer{ - id: "myid", - config: &configs.Config{}, - cgroupManager: &mockCgroupManager{ - pids: []int{1, 2, 3}, - stats: &cgroups.Stats{ - MemoryStats: cgroups.MemoryStats{ - Usage: cgroups.MemoryData{ - Usage: 1024, - }, - }, - }, - }, - } - stats, err := container.Stats() - if err != nil { - t.Fatal(err) - } - if stats.CgroupStats == nil { - t.Fatal("cgroup stats are nil") - } - if stats.CgroupStats.MemoryStats.Usage.Usage != 1024 { - t.Fatalf("expected memory usage 1024 but recevied %d", stats.CgroupStats.MemoryStats.Usage.Usage) - } -} - -func TestGetContainerState(t *testing.T) { - var ( - pid = os.Getpid() - expectedMemoryPath = "/sys/fs/cgroup/memory/myid" - expectedNetworkPath = "/networks/fd" - ) - container := &linuxContainer{ - id: "myid", - config: &configs.Config{ - Namespaces: []configs.Namespace{ - {Type: configs.NEWPID}, - {Type: configs.NEWNS}, - {Type: configs.NEWNET, Path: expectedNetworkPath}, - {Type: configs.NEWUTS}, - // emulate host for IPC - //{Type: configs.NEWIPC}, - }, - }, - initProcess: &mockProcess{ - _pid: pid, - started: "010", - }, - cgroupManager: &mockCgroupManager{ - pids: []int{1, 2, 3}, - stats: &cgroups.Stats{ - MemoryStats: cgroups.MemoryStats{ - Usage: cgroups.MemoryData{ - Usage: 1024, - }, - }, - }, - paths: map[string]string{ - "memory": expectedMemoryPath, - }, - }, - } - container.state = &createdState{c: container} - state, err := container.State() - if err != nil { - t.Fatal(err) - } - if state.InitProcessPid != pid { - t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid) - } - if state.InitProcessStartTime != "010" { - t.Fatalf("expected process start time 010 but received %s", state.InitProcessStartTime) - } - paths := state.CgroupPaths - if paths == nil { - t.Fatal("cgroup paths should not be nil") - } - if memPath := paths["memory"]; memPath != expectedMemoryPath { - t.Fatalf("expected memory path %q but received %q", expectedMemoryPath, memPath) - } - for _, ns := range container.config.Namespaces { - path := state.NamespacePaths[ns.Type] - if path == "" { - t.Fatalf("expected non nil namespace path for %s", ns.Type) - } - if ns.Type == configs.NEWNET { - if path != expectedNetworkPath { - t.Fatalf("expected path %q but received %q", expectedNetworkPath, path) - } - } else { - file := "" - switch ns.Type { - case configs.NEWNET: - file = "net" - case configs.NEWNS: - file = "mnt" - case configs.NEWPID: - file = "pid" - case configs.NEWIPC: - file = "ipc" - case configs.NEWUSER: - file = "user" - case configs.NEWUTS: - file = "uts" - } - expected := fmt.Sprintf("/proc/%d/ns/%s", pid, file) - if expected != path { - t.Fatalf("expected path %q but received %q", expected, path) - } - } - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go deleted file mode 100644 index bb84ff74..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go deleted file mode 100644 index 13323858..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build linux freebsd - -package libcontainer - -// cgroup restoring strategy provided by criu -type cg_mode uint32 - -const ( - CRIU_CG_MODE_SOFT cg_mode = 3 + iota // restore cgroup properties if only dir created by criu - CRIU_CG_MODE_FULL // always restore all cgroups and their properties - CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system - CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT -) - -type CriuPageServerInfo struct { - Address string // IP address of CRIU page server - Port int32 // port number of CRIU page server -} - -type VethPairName struct { - ContainerInterfaceName string - HostInterfaceName string -} - -type CriuOpts struct { - ImagesDirectory string // directory for storing image files - WorkDirectory string // directory to cd and write logs/pidfiles/stats to - LeaveRunning bool // leave container in running state after checkpoint - TcpEstablished bool // checkpoint/restore established TCP connections - ExternalUnixConnections bool // allow external unix connections - ShellJob bool // allow to dump and restore shell jobs - FileLocks bool // handle file locks, for safety - PageServer CriuPageServerInfo // allow to dump to criu page server - VethPairs []VethPairName // pass the veth to criu when restore - ManageCgroupsMode cg_mode // dump or restore cgroup mode - EmptyNs uint32 // don't c/r properties for namespace from this mask -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go deleted file mode 100644 index bc920770..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainer - -// TODO Windows: This can ultimately be entirely factored out as criu is -// a Unix concept not relevant on Windows. -type CriuOpts struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/error.go b/vendor/github.com/opencontainers/runc/libcontainer/error.go deleted file mode 100644 index b50aaae8..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/error.go +++ /dev/null @@ -1,70 +0,0 @@ -package libcontainer - -import "io" - -// API error code type. -type ErrorCode int - -// API error codes. -const ( - // Factory errors - IdInUse ErrorCode = iota - InvalidIdFormat - - // Container errors - ContainerNotExists - ContainerPaused - ContainerNotStopped - ContainerNotRunning - ContainerNotPaused - - // Process errors - NoProcessOps - - // Common errors - ConfigInvalid - ConsoleExists - SystemError -) - -func (c ErrorCode) String() string { - switch c { - case IdInUse: - return "Id already in use" - case InvalidIdFormat: - return "Invalid format" - case ContainerPaused: - return "Container paused" - case ConfigInvalid: - return "Invalid configuration" - case SystemError: - return "System error" - case ContainerNotExists: - return "Container does not exist" - case ContainerNotStopped: - return "Container is not stopped" - case ContainerNotRunning: - return "Container is not running" - case ConsoleExists: - return "Console exists for process" - case ContainerNotPaused: - return "Container is not paused" - case NoProcessOps: - return "No process operations" - default: - return "Unknown error" - } -} - -// API Error type. -type Error interface { - error - - // Returns a verbose string including the error message - // and a representation of the stack trace suitable for - // printing. - Detail(w io.Writer) error - - // Returns the error code for this error. - Code() ErrorCode -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/error_test.go b/vendor/github.com/opencontainers/runc/libcontainer/error_test.go deleted file mode 100644 index 4bf4c9f5..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/error_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -import "testing" - -func TestErrorCode(t *testing.T) { - codes := map[ErrorCode]string{ - IdInUse: "Id already in use", - InvalidIdFormat: "Invalid format", - ContainerPaused: "Container paused", - ConfigInvalid: "Invalid configuration", - SystemError: "System error", - ContainerNotExists: "Container does not exist", - } - - for code, expected := range codes { - if actual := code.String(); actual != expected { - t.Fatalf("expected string %q but received %q", expected, actual) - } - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory.go b/vendor/github.com/opencontainers/runc/libcontainer/factory.go deleted file mode 100644 index f0ccb52e..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory.go +++ /dev/null @@ -1,45 +0,0 @@ -package libcontainer - -import ( - "github.com/opencontainers/runc/libcontainer/configs" -) - -type Factory interface { - // Creates a new container with the given id and starts the initial process inside it. - // id must be a string containing only letters, digits and underscores and must contain - // between 1 and 1024 characters, inclusive. - // - // The id must not already be in use by an existing container. Containers created using - // a factory with the same path (and file system) must have distinct ids. - // - // Returns the new container with a running process. - // - // errors: - // IdInUse - id is already in use by a container - // InvalidIdFormat - id has incorrect format - // ConfigInvalid - config is invalid - // Systemerror - System error - // - // On error, any partially created container parts are cleaned up (the operation is atomic). - Create(id string, config *configs.Config) (Container, error) - - // Load takes an ID for an existing container and returns the container information - // from the state. This presents a read only view of the container. - // - // errors: - // Path does not exist - // Container is stopped - // System error - Load(id string) (Container, error) - - // StartInitialization is an internal API to libcontainer used during the reexec of the - // container. - // - // Errors: - // Pipe connection error - // System error - StartInitialization() error - - // Type returns info string about factory type (e.g. lxc, libcontainer...) - Type() string -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go deleted file mode 100644 index e67b001f..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ /dev/null @@ -1,290 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime/debug" - "strconv" - "syscall" - - "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/systemd" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/configs/validate" - "github.com/opencontainers/runc/libcontainer/utils" -) - -const ( - stateFilename = "state.json" -) - -var ( - idRegex = regexp.MustCompile(`^[\w-\.]+$`) - maxIdLen = 1024 -) - -// InitArgs returns an options func to configure a LinuxFactory with the -// provided init arguments. -func InitArgs(args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - name := args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - name = lp - } - } else { - abs, err := filepath.Abs(name) - if err != nil { - return err - } - name = abs - } - l.InitPath = "/proc/self/exe" - l.InitArgs = append([]string{name}, args[1:]...) - return nil - } -} - -// InitPath returns an options func to configure a LinuxFactory with the -// provided absolute path to the init binary and arguements. -func InitPath(path string, args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - l.InitPath = path - l.InitArgs = args - return nil - } -} - -// SystemdCgroups is an options func to configure a LinuxFactory to return -// containers that use systemd to create and manage cgroups. -func SystemdCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &systemd.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// Cgroupfs is an options func to configure a LinuxFactory to return -// containers that use the native cgroups filesystem implementation to -// create and manage cgroups. -func Cgroupfs(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &fs.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs. -func TmpfsRoot(l *LinuxFactory) error { - mounted, err := mount.Mounted(l.Root) - if err != nil { - return err - } - if !mounted { - if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil { - return err - } - } - return nil -} - -// New returns a linux based container factory based in the root directory and -// configures the factory with the provided option funcs. -func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { - if root != "" { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, newGenericError(err, SystemError) - } - } - l := &LinuxFactory{ - Root: root, - Validator: validate.New(), - CriuPath: "criu", - } - InitArgs(os.Args[0], "init")(l) - Cgroupfs(l) - for _, opt := range options { - if err := opt(l); err != nil { - return nil, err - } - } - return l, nil -} - -// LinuxFactory implements the default factory interface for linux based systems. -type LinuxFactory struct { - // Root directory for the factory to store state. - Root string - - // InitPath is the absolute path to the init binary. - InitPath string - - // InitArgs are arguments for calling the init responsibilities for spawning - // a container. - InitArgs []string - - // CriuPath is the path to the criu binary used for checkpoint and restore of - // containers. - CriuPath string - - // Validator provides validation to container configurations. - Validator validate.Validator - - // NewCgroupsManager returns an initialized cgroups manager for a single container. - NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager -} - -func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - if err := l.validateID(id); err != nil { - return nil, err - } - if err := l.Validator.Validate(config); err != nil { - return nil, newGenericError(err, ConfigInvalid) - } - containerRoot := filepath.Join(l.Root, id) - if _, err := os.Stat(containerRoot); err == nil { - return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse) - } else if !os.IsNotExist(err) { - return nil, newGenericError(err, SystemError) - } - if err := os.MkdirAll(containerRoot, 0700); err != nil { - return nil, newGenericError(err, SystemError) - } - c := &linuxContainer{ - id: id, - root: containerRoot, - config: config, - initPath: l.InitPath, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), - } - c.state = &stoppedState{c: c} - return c, nil -} - -func (l *LinuxFactory) Load(id string) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - containerRoot := filepath.Join(l.Root, id) - state, err := l.loadState(containerRoot) - if err != nil { - return nil, err - } - r := &nonChildProcess{ - processPid: state.InitProcessPid, - processStartTime: state.InitProcessStartTime, - fds: state.ExternalDescriptors, - } - c := &linuxContainer{ - initProcess: r, - id: id, - config: &state.Config, - initPath: l.InitPath, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), - root: containerRoot, - created: state.Created, - } - c.state = &createdState{c: c, s: Created} - if err := c.refreshState(); err != nil { - return nil, err - } - return c, nil -} - -func (l *LinuxFactory) Type() string { - return "libcontainer" -} - -// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state -// This is a low level implementation detail of the reexec and should not be consumed externally -func (l *LinuxFactory) StartInitialization() (err error) { - fdStr := os.Getenv("_LIBCONTAINER_INITPIPE") - pipefd, err := strconv.Atoi(fdStr) - if err != nil { - return fmt.Errorf("error converting env var _LIBCONTAINER_INITPIPE(%q) to an int: %s", fdStr, err) - } - var ( - pipe = os.NewFile(uintptr(pipefd), "pipe") - it = initType(os.Getenv("_LIBCONTAINER_INITTYPE")) - ) - // clear the current process's environment to clean any libcontainer - // specific env vars. - os.Clearenv() - var i initer - defer func() { - // We have an error during the initialization of the container's init, - // send it back to the parent process in the form of an initError. - // If container's init successed, syscall.Exec will not return, hence - // this defer function will never be called. - if _, ok := i.(*linuxStandardInit); ok { - // Synchronisation only necessary for standard init. - if err := utils.WriteJSON(pipe, syncT{procError}); err != nil { - panic(err) - } - } - if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil { - panic(err) - } - // ensure that this pipe is always closed - pipe.Close() - }() - - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack())) - } - }() - - i, err = newContainerInit(it, pipe) - if err != nil { - return err - } - return i.Init() -} - -func (l *LinuxFactory) loadState(root string) (*State, error) { - f, err := os.Open(filepath.Join(root, stateFilename)) - if err != nil { - if os.IsNotExist(err) { - return nil, newGenericError(err, ContainerNotExists) - } - return nil, newGenericError(err, SystemError) - } - defer f.Close() - var state *State - if err := json.NewDecoder(f).Decode(&state); err != nil { - return nil, newGenericError(err, SystemError) - } - return state, nil -} - -func (l *LinuxFactory) validateID(id string) error { - if !idRegex.MatchString(id) { - return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) - } - if len(id) > maxIdLen { - return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux_test.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux_test.go deleted file mode 100644 index ea3b5132..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux_test.go +++ /dev/null @@ -1,207 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "io/ioutil" - "os" - "path/filepath" - "reflect" - "syscall" - "testing" - - "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" -) - -func newTestRoot() (string, error) { - dir, err := ioutil.TempDir("", "libcontainer") - if err != nil { - return "", err - } - return dir, nil -} - -func TestFactoryNew(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - if factory == nil { - t.Fatal("factory should not be nil") - } - lfactory, ok := factory.(*LinuxFactory) - if !ok { - t.Fatal("expected linux factory returned on linux based systems") - } - if lfactory.Root != root { - t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root) - } - - if factory.Type() != "libcontainer" { - t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer") - } -} - -func TestFactoryNewTmpfs(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs, TmpfsRoot) - if err != nil { - t.Fatal(err) - } - if factory == nil { - t.Fatal("factory should not be nil") - } - lfactory, ok := factory.(*LinuxFactory) - if !ok { - t.Fatal("expected linux factory returned on linux based systems") - } - if lfactory.Root != root { - t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root) - } - - if factory.Type() != "libcontainer" { - t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer") - } - mounted, err := mount.Mounted(lfactory.Root) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatalf("Factory Root is not mounted") - } - mounts, err := mount.GetMounts() - if err != nil { - t.Fatal(err) - } - var found bool - for _, m := range mounts { - if m.Mountpoint == lfactory.Root { - if m.Fstype != "tmpfs" { - t.Fatalf("Fstype of root: %s, expected %s", m.Fstype, "tmpfs") - } - if m.Source != "tmpfs" { - t.Fatalf("Source of root: %s, expected %s", m.Source, "tmpfs") - } - found = true - } - } - if !found { - t.Fatalf("Factory Root is not listed in mounts list") - } - defer syscall.Unmount(root, syscall.MNT_DETACH) -} - -func TestFactoryLoadNotExists(t *testing.T) { - root, rerr := newTestRoot() - if rerr != nil { - t.Fatal(rerr) - } - defer os.RemoveAll(root) - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - _, err = factory.Load("nocontainer") - if err == nil { - t.Fatal("expected nil error loading non-existing container") - } - lerr, ok := err.(Error) - if !ok { - t.Fatal("expected libcontainer error type") - } - if lerr.Code() != ContainerNotExists { - t.Fatalf("expected error code %s but received %s", ContainerNotExists, lerr.Code()) - } -} - -func TestFactoryLoadContainer(t *testing.T) { - root, err := newTestRoot() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - // setup default container config and state for mocking - var ( - id = "1" - expectedHooks = &configs.Hooks{ - Prestart: []configs.Hook{ - configs.CommandHook{Command: configs.Command{Path: "prestart-hook"}}, - }, - Poststart: []configs.Hook{ - configs.CommandHook{Command: configs.Command{Path: "poststart-hook"}}, - }, - Poststop: []configs.Hook{ - unserializableHook{}, - configs.CommandHook{Command: configs.Command{Path: "poststop-hook"}}, - }, - } - expectedConfig = &configs.Config{ - Rootfs: "/mycontainer/root", - Hooks: expectedHooks, - } - expectedState = &State{ - BaseState: BaseState{ - InitProcessPid: 1024, - Config: *expectedConfig, - }, - } - ) - if err := os.Mkdir(filepath.Join(root, id), 0700); err != nil { - t.Fatal(err) - } - if err := marshal(filepath.Join(root, id, stateFilename), expectedState); err != nil { - t.Fatal(err) - } - factory, err := New(root, Cgroupfs) - if err != nil { - t.Fatal(err) - } - container, err := factory.Load(id) - if err != nil { - t.Fatal(err) - } - if container.ID() != id { - t.Fatalf("expected container id %q but received %q", id, container.ID()) - } - config := container.Config() - if config.Rootfs != expectedConfig.Rootfs { - t.Fatalf("expected rootfs %q but received %q", expectedConfig.Rootfs, config.Rootfs) - } - expectedHooks.Poststop = expectedHooks.Poststop[1:] // expect unserializable hook to be skipped - if !reflect.DeepEqual(config.Hooks, expectedHooks) { - t.Fatalf("expects hooks %q but received %q", expectedHooks, config.Hooks) - } - lcontainer, ok := container.(*linuxContainer) - if !ok { - t.Fatal("expected linux container on linux based systems") - } - if lcontainer.initProcess.pid() != expectedState.InitProcessPid { - t.Fatalf("expected init pid %d but received %d", expectedState.InitProcessPid, lcontainer.initProcess.pid()) - } -} - -func marshal(path string, v interface{}) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - return utils.WriteJSON(f, v) -} - -type unserializableHook struct{} - -func (unserializableHook) Run(configs.HookState) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go deleted file mode 100644 index 3ed33da6..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go +++ /dev/null @@ -1,87 +0,0 @@ -package libcontainer - -import ( - "io" - "text/template" - "time" - - "github.com/opencontainers/runc/libcontainer/stacktrace" -) - -type syncType uint8 - -const ( - procReady syncType = iota - procError - procRun - procHooks - procResume -) - -type syncT struct { - Type syncType `json:"type"` -} - -var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}} -Code: {{.ECode}} -{{if .Message }} -Message: {{.Message}} -{{end}} -Frames:{{range $i, $frame := .Stack.Frames}} ---- -{{$i}}: {{$frame.Function}} -Package: {{$frame.Package}} -File: {{$frame.File}}@{{$frame.Line}}{{end}} -`)) - -func newGenericError(err error, c ErrorCode) Error { - if le, ok := err.(Error); ok { - return le - } - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: c, - Stack: stacktrace.Capture(1), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -func newSystemError(err error) Error { - if le, ok := err.(Error); ok { - return le - } - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: SystemError, - Stack: stacktrace.Capture(1), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -type genericError struct { - Timestamp time.Time - ECode ErrorCode - Err error `json:"-"` - Message string - Stack stacktrace.Stacktrace -} - -func (e *genericError) Error() string { - return e.Message -} - -func (e *genericError) Code() ErrorCode { - return e.ECode -} - -func (e *genericError) Detail(w io.Writer) error { - return errorTemplate.Execute(w, e) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error_test.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error_test.go deleted file mode 100644 index 292d2a36..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/generic_error_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io/ioutil" - "testing" -) - -func TestErrorDetail(t *testing.T) { - err := newGenericError(fmt.Errorf("test error"), SystemError) - if derr := err.Detail(ioutil.Discard); derr != nil { - t.Fatal(derr) - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go deleted file mode 100644 index 0bde656e..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ /dev/null @@ -1,367 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/vishvananda/netlink" -) - -type initType string - -const ( - initSetns initType = "setns" - initStandard initType = "standard" -) - -type pid struct { - Pid int `json:"pid"` -} - -// network is an internal struct used to setup container networks. -type network struct { - configs.Network - - // TempVethPeerName is a unique temporary veth peer name that was placed into - // the container's namespace. - TempVethPeerName string `json:"temp_veth_peer_name"` -} - -// initConfig is used for transferring parameters from Exec() to Init() -type initConfig struct { - Args []string `json:"args"` - Env []string `json:"env"` - Cwd string `json:"cwd"` - Capabilities []string `json:"capabilities"` - ProcessLabel string `json:"process_label"` - AppArmorProfile string `json:"apparmor_profile"` - NoNewPrivileges bool `json:"no_new_privileges"` - User string `json:"user"` - Config *configs.Config `json:"config"` - Console string `json:"console"` - Networks []*network `json:"network"` - PassedFilesCount int `json:"passed_files_count"` - ContainerId string `json:"containerid"` - Rlimits []configs.Rlimit `json:"rlimits"` -} - -type initer interface { - Init() error -} - -func newContainerInit(t initType, pipe *os.File) (initer, error) { - var config *initConfig - if err := json.NewDecoder(pipe).Decode(&config); err != nil { - return nil, err - } - if err := populateProcessEnvironment(config.Env); err != nil { - return nil, err - } - switch t { - case initSetns: - return &linuxSetnsInit{ - config: config, - }, nil - case initStandard: - return &linuxStandardInit{ - pipe: pipe, - parentPid: syscall.Getppid(), - config: config, - }, nil - } - return nil, fmt.Errorf("unknown init type %q", t) -} - -// populateProcessEnvironment loads the provided environment variables into the -// current processes's environment. -func populateProcessEnvironment(env []string) error { - for _, pair := range env { - p := strings.SplitN(pair, "=", 2) - if len(p) < 2 { - return fmt.Errorf("invalid environment '%v'", pair) - } - if err := os.Setenv(p[0], p[1]); err != nil { - return err - } - } - return nil -} - -// finalizeNamespace drops the caps, sets the correct user -// and working dir, and closes any leaked file descriptors -// before executing the command inside the namespace -func finalizeNamespace(config *initConfig) error { - // Ensure that all unwanted fds we may have accidentally - // inherited are marked close-on-exec so they stay out of the - // container - if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil { - return err - } - - capabilities := config.Config.Capabilities - if config.Capabilities != nil { - capabilities = config.Capabilities - } - w, err := newCapWhitelist(capabilities) - if err != nil { - return err - } - // drop capabilities in bounding set before changing user - if err := w.dropBoundingSet(); err != nil { - return err - } - // preserve existing capabilities while we change users - if err := system.SetKeepCaps(); err != nil { - return err - } - if err := setupUser(config); err != nil { - return err - } - if err := system.ClearKeepCaps(); err != nil { - return err - } - // drop all other capabilities - if err := w.drop(); err != nil { - return err - } - if config.Cwd != "" { - if err := syscall.Chdir(config.Cwd); err != nil { - return err - } - } - return nil -} - -// syncParentReady sends to the given pipe a JSON payload which indicates that -// the init is ready to Exec the child process. It then waits for the parent to -// indicate that it is cleared to Exec. -func syncParentReady(pipe io.ReadWriter) error { - // Tell parent. - if err := utils.WriteJSON(pipe, syncT{procReady}); err != nil { - return err - } - // Wait for parent to give the all-clear. - var procSync syncT - if err := json.NewDecoder(pipe).Decode(&procSync); err != nil { - if err == io.EOF { - return fmt.Errorf("parent closed synchronisation channel") - } - if procSync.Type != procRun { - return fmt.Errorf("invalid synchronisation flag from parent") - } - } - return nil -} - -// syncParentHooks sends to the given pipe a JSON payload which indicates that -// the parent should execute pre-start hooks. It then waits for the parent to -// indicate that it is cleared to resume. -func syncParentHooks(pipe io.ReadWriter) error { - // Tell parent. - if err := utils.WriteJSON(pipe, syncT{procHooks}); err != nil { - return err - } - // Wait for parent to give the all-clear. - var procSync syncT - if err := json.NewDecoder(pipe).Decode(&procSync); err != nil { - if err == io.EOF { - return fmt.Errorf("parent closed synchronisation channel") - } - if procSync.Type != procResume { - return fmt.Errorf("invalid synchronisation flag from parent") - } - } - return nil -} - -// setupUser changes the groups, gid, and uid for the user inside the container -func setupUser(config *initConfig) error { - // Set up defaults. - defaultExecUser := user.ExecUser{ - Uid: syscall.Getuid(), - Gid: syscall.Getgid(), - Home: "/", - } - passwdPath, err := user.GetPasswdPath() - if err != nil { - return err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return err - } - execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath) - if err != nil { - return err - } - - var addGroups []int - if len(config.Config.AdditionalGroups) > 0 { - addGroups, err = user.GetAdditionalGroupsPath(config.Config.AdditionalGroups, groupPath) - if err != nil { - return err - } - } - // before we change to the container's user make sure that the processes STDIO - // is correctly owned by the user that we are switching to. - if err := fixStdioPermissions(execUser); err != nil { - return err - } - suppGroups := append(execUser.Sgids, addGroups...) - if err := syscall.Setgroups(suppGroups); err != nil { - return err - } - - if err := system.Setgid(execUser.Gid); err != nil { - return err - } - if err := system.Setuid(execUser.Uid); err != nil { - return err - } - // if we didn't get HOME already, set it based on the user's HOME - if envHome := os.Getenv("HOME"); envHome == "" { - if err := os.Setenv("HOME", execUser.Home); err != nil { - return err - } - } - return nil -} - -// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user. -// The ownership needs to match because it is created outside of the container and needs to be -// localized. -func fixStdioPermissions(u *user.ExecUser) error { - var null syscall.Stat_t - if err := syscall.Stat("/dev/null", &null); err != nil { - return err - } - for _, fd := range []uintptr{ - os.Stdin.Fd(), - os.Stderr.Fd(), - os.Stdout.Fd(), - } { - var s syscall.Stat_t - if err := syscall.Fstat(int(fd), &s); err != nil { - return err - } - // skip chown of /dev/null if it was used as one of the STDIO fds. - if s.Rdev == null.Rdev { - continue - } - if err := syscall.Fchown(int(fd), u.Uid, u.Gid); err != nil { - return err - } - } - return nil -} - -// setupNetwork sets up and initializes any network interface inside the container. -func setupNetwork(config *initConfig) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err := strategy.initialize(config); err != nil { - return err - } - } - return nil -} - -func setupRoute(config *configs.Config) error { - for _, config := range config.Routes { - _, dst, err := net.ParseCIDR(config.Destination) - if err != nil { - return err - } - src := net.ParseIP(config.Source) - if src == nil { - return fmt.Errorf("Invalid source for route: %s", config.Source) - } - gw := net.ParseIP(config.Gateway) - if gw == nil { - return fmt.Errorf("Invalid gateway for route: %s", config.Gateway) - } - l, err := netlink.LinkByName(config.InterfaceName) - if err != nil { - return err - } - route := &netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - Dst: dst, - Src: src, - Gw: gw, - LinkIndex: l.Attrs().Index, - } - if err := netlink.RouteAdd(route); err != nil { - return err - } - } - return nil -} - -func setupRlimits(limits []configs.Rlimit, pid int) error { - for _, rlimit := range limits { - if err := system.Prlimit(pid, rlimit.Type, syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil { - return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) - } - } - return nil -} - -func setOomScoreAdj(oomScoreAdj int, pid int) error { - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - - return ioutil.WriteFile(path, []byte(strconv.Itoa(oomScoreAdj)), 0600) -} - -// killCgroupProcesses freezes then iterates over all the processes inside the -// manager's cgroups sending a SIGKILL to each process then waiting for them to -// exit. -func killCgroupProcesses(m cgroups.Manager) error { - var procs []*os.Process - if err := m.Freeze(configs.Frozen); err != nil { - logrus.Warn(err) - } - pids, err := m.GetAllPids() - if err != nil { - m.Freeze(configs.Thawed) - return err - } - for _, pid := range pids { - p, err := os.FindProcess(pid) - if err != nil { - logrus.Warn(err) - continue - } - procs = append(procs, p) - if err := p.Kill(); err != nil { - logrus.Warn(err) - } - } - if err := m.Freeze(configs.Thawed); err != nil { - logrus.Warn(err) - } - for _, p := range procs { - if _, err := p.Wait(); err != nil { - logrus.Warn(err) - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go deleted file mode 100644 index 16630133..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "syscall" - - "github.com/vishvananda/netlink/nl" -) - -// list of known message types we want to send to bootstrap program -// The number is randomly chosen to not conflict with known netlink types -const ( - InitMsg uint16 = 62000 - CloneFlagsAttr uint16 = 27281 - ConsolePathAttr uint16 = 27282 - NsPathsAttr uint16 = 27283 - UidmapAttr uint16 = 27284 - GidmapAttr uint16 = 27285 - SetgroupAttr uint16 = 27286 - // When syscall.NLA_HDRLEN is in gccgo, take this out. - syscall_NLA_HDRLEN = (syscall.SizeofNlAttr + syscall.NLA_ALIGNTO - 1) & ^(syscall.NLA_ALIGNTO - 1) -) - -type Int32msg struct { - Type uint16 - Value uint32 -} - -// int32msg has the following representation -// | nlattr len | nlattr type | -// | uint32 value | -func (msg *Int32msg) Serialize() []byte { - buf := make([]byte, msg.Len()) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(msg.Len())) - native.PutUint16(buf[2:4], msg.Type) - native.PutUint32(buf[4:8], msg.Value) - return buf -} - -func (msg *Int32msg) Len() int { - return syscall_NLA_HDRLEN + 4 -} - -// bytemsg has the following representation -// | nlattr len | nlattr type | -// | value | pad | -type Bytemsg struct { - Type uint16 - Value []byte -} - -func (msg *Bytemsg) Serialize() []byte { - l := msg.Len() - buf := make([]byte, (l+syscall.NLA_ALIGNTO-1) & ^(syscall.NLA_ALIGNTO-1)) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(l)) - native.PutUint16(buf[2:4], msg.Type) - copy(buf[4:], msg.Value) - return buf -} - -func (msg *Bytemsg) Len() int { - return syscall_NLA_HDRLEN + len(msg.Value) + 1 // null-terminated -} - -type Boolmsg struct { - Type uint16 - Value bool -} - -func (msg *Boolmsg) Serialize() []byte { - buf := make([]byte, msg.Len()) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(msg.Len())) - native.PutUint16(buf[2:4], msg.Type) - if msg.Value { - buf[4] = 1 - } else { - buf[4] = 0 - } - return buf -} - -func (msg *Boolmsg) Len() int { - return syscall_NLA_HDRLEN + 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go deleted file mode 100644 index 5075bee4..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go +++ /dev/null @@ -1,259 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "net" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/vishvananda/netlink" -) - -var strategies = map[string]networkStrategy{ - "veth": &veth{}, - "loopback": &loopback{}, -} - -// networkStrategy represents a specific network configuration for -// a container's networking stack -type networkStrategy interface { - create(*network, int) error - initialize(*network) error - detach(*configs.Network) error - attach(*configs.Network) error -} - -// getStrategy returns the specific network strategy for the -// provided type. -func getStrategy(tpe string) (networkStrategy, error) { - s, exists := strategies[tpe] - if !exists { - return nil, fmt.Errorf("unknown strategy type %q", tpe) - } - return s, nil -} - -// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. -func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { - out := &NetworkInterface{Name: interfaceName} - // This can happen if the network runtime information is missing - possible if the - // container was created by an old version of libcontainer. - if interfaceName == "" { - return out, nil - } - type netStatsPair struct { - // Where to write the output. - Out *uint64 - // The network stats file to read. - File string - } - // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - netStats := []netStatsPair{ - {Out: &out.RxBytes, File: "tx_bytes"}, - {Out: &out.RxPackets, File: "tx_packets"}, - {Out: &out.RxErrors, File: "tx_errors"}, - {Out: &out.RxDropped, File: "tx_dropped"}, - - {Out: &out.TxBytes, File: "rx_bytes"}, - {Out: &out.TxPackets, File: "rx_packets"}, - {Out: &out.TxErrors, File: "rx_errors"}, - {Out: &out.TxDropped, File: "rx_dropped"}, - } - for _, netStat := range netStats { - data, err := readSysfsNetworkStats(interfaceName, netStat.File) - if err != nil { - return nil, err - } - *(netStat.Out) = data - } - return out, nil -} - -// Reads the specified statistics available under /sys/class/net//statistics -func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { - data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// loopback is a network strategy that provides a basic loopback device -type loopback struct { -} - -func (l *loopback) create(n *network, nspid int) error { - return nil -} - -func (l *loopback) initialize(config *network) error { - return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}}) -} - -func (l *loopback) attach(n *configs.Network) (err error) { - return nil -} - -func (l *loopback) detach(n *configs.Network) (err error) { - return nil -} - -// veth is a network strategy that uses a bridge and creates -// a veth pair, one that is attached to the bridge on the host and the other -// is placed inside the container's namespace -type veth struct { -} - -func (v *veth) detach(n *configs.Network) (err error) { - return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil) -} - -// attach a container network interface to an external network -func (v *veth) attach(n *configs.Network) (err error) { - brl, err := netlink.LinkByName(n.Bridge) - if err != nil { - return err - } - br, ok := brl.(*netlink.Bridge) - if !ok { - return fmt.Errorf("Wrong device type %T", brl) - } - host, err := netlink.LinkByName(n.HostInterfaceName) - if err != nil { - return err - } - - if err := netlink.LinkSetMaster(host, br); err != nil { - return err - } - if err := netlink.LinkSetMTU(host, n.Mtu); err != nil { - return err - } - if n.HairpinMode { - if err := netlink.LinkSetHairpin(host, true); err != nil { - return err - } - } - if err := netlink.LinkSetUp(host); err != nil { - return err - } - - return nil -} - -func (v *veth) create(n *network, nspid int) (err error) { - tmpName, err := v.generateTempPeerName() - if err != nil { - return err - } - n.TempVethPeerName = tmpName - if n.Bridge == "" { - return fmt.Errorf("bridge is not specified") - } - veth := &netlink.Veth{ - LinkAttrs: netlink.LinkAttrs{ - Name: n.HostInterfaceName, - TxQLen: n.TxQueueLen, - }, - PeerName: n.TempVethPeerName, - } - if err := netlink.LinkAdd(veth); err != nil { - return err - } - defer func() { - if err != nil { - netlink.LinkDel(veth) - } - }() - if err := v.attach(&n.Network); err != nil { - return err - } - child, err := netlink.LinkByName(n.TempVethPeerName) - if err != nil { - return err - } - return netlink.LinkSetNsPid(child, nspid) -} - -func (v *veth) generateTempPeerName() (string, error) { - return utils.GenerateRandomName("veth", 7) -} - -func (v *veth) initialize(config *network) error { - peer := config.TempVethPeerName - if peer == "" { - return fmt.Errorf("peer is not specified") - } - child, err := netlink.LinkByName(peer) - if err != nil { - return err - } - if err := netlink.LinkSetDown(child); err != nil { - return err - } - if err := netlink.LinkSetName(child, config.Name); err != nil { - return err - } - // get the interface again after we changed the name as the index also changes. - if child, err = netlink.LinkByName(config.Name); err != nil { - return err - } - if config.MacAddress != "" { - mac, err := net.ParseMAC(config.MacAddress) - if err != nil { - return err - } - if err := netlink.LinkSetHardwareAddr(child, mac); err != nil { - return err - } - } - ip, err := netlink.ParseAddr(config.Address) - if err != nil { - return err - } - if err := netlink.AddrAdd(child, ip); err != nil { - return err - } - if config.IPv6Address != "" { - ip6, err := netlink.ParseAddr(config.IPv6Address) - if err != nil { - return err - } - if err := netlink.AddrAdd(child, ip6); err != nil { - return err - } - } - if err := netlink.LinkSetMTU(child, config.Mtu); err != nil { - return err - } - if err := netlink.LinkSetUp(child); err != nil { - return err - } - if config.Gateway != "" { - gw := net.ParseIP(config.Gateway) - if err := netlink.RouteAdd(&netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - LinkIndex: child.Attrs().Index, - Gw: gw, - }); err != nil { - return err - } - } - if config.IPv6Gateway != "" { - gw := net.ParseIP(config.IPv6Gateway) - if err := netlink.RouteAdd(&netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - LinkIndex: child.Attrs().Index, - Gw: gw, - }); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go deleted file mode 100644 index 839a50c5..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" -) - -const oomCgroupName = "memory" - -type PressureLevel uint - -const ( - LowPressure PressureLevel = iota - MediumPressure - CriticalPressure -) - -func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) { - evFile, err := os.Open(filepath.Join(cgDir, evName)) - if err != nil { - return nil, err - } - fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) - if syserr != 0 { - evFile.Close() - return nil, syserr - } - - eventfd := os.NewFile(fd, "eventfd") - - eventControlPath := filepath.Join(cgDir, "cgroup.event_control") - data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg) - if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { - eventfd.Close() - evFile.Close() - return nil, err - } - ch := make(chan struct{}) - go func() { - defer func() { - close(ch) - eventfd.Close() - evFile.Close() - }() - buf := make([]byte, 8) - for { - if _, err := eventfd.Read(buf); err != nil { - return - } - // When a cgroup is destroyed, an event is sent to eventfd. - // So if the control path is gone, return instead of notifying. - if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { - return - } - ch <- struct{}{} - } - }() - return ch, nil -} - -// notifyOnOOM returns channel on which you can expect event about OOM, -// if process died without OOM this channel will be closed. -func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { - dir := paths[oomCgroupName] - if dir == "" { - return nil, fmt.Errorf("path %q missing", oomCgroupName) - } - - return registerMemoryEvent(dir, "memory.oom_control", "") -} - -func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) { - dir := paths[oomCgroupName] - if dir == "" { - return nil, fmt.Errorf("path %q missing", oomCgroupName) - } - - if level > CriticalPressure { - return nil, fmt.Errorf("invalid pressure level %d", level) - } - - levelStr := []string{"low", "medium", "critical"}[level] - return registerMemoryEvent(dir, "memory.pressure_level", levelStr) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux_test.go b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux_test.go deleted file mode 100644 index 9aa4f3b3..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - "time" -) - -type notifyFunc func(paths map[string]string) (<-chan struct{}, error) - -func testMemoryNotification(t *testing.T, evName string, notify notifyFunc, targ string) { - memoryPath, err := ioutil.TempDir("", "testmemnotification-"+evName) - if err != nil { - t.Fatal(err) - } - evFile := filepath.Join(memoryPath, evName) - eventPath := filepath.Join(memoryPath, "cgroup.event_control") - if err := ioutil.WriteFile(evFile, []byte{}, 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(eventPath, []byte{}, 0700); err != nil { - t.Fatal(err) - } - paths := map[string]string{ - "memory": memoryPath, - } - ch, err := notify(paths) - if err != nil { - t.Fatal("expected no error, got:", err) - } - - data, err := ioutil.ReadFile(eventPath) - if err != nil { - t.Fatal("couldn't read event control file:", err) - } - - var eventFd, evFd int - var arg string - if targ != "" { - _, err = fmt.Sscanf(string(data), "%d %d %s", &eventFd, &evFd, &arg) - } else { - _, err = fmt.Sscanf(string(data), "%d %d", &eventFd, &evFd) - } - if err != nil || arg != targ { - t.Fatalf("invalid control data %q: %s", data, err) - } - - // re-open the eventfd - efd, err := syscall.Dup(eventFd) - if err != nil { - t.Fatal("unable to reopen eventfd:", err) - } - defer syscall.Close(efd) - - if err != nil { - t.Fatal("unable to dup event fd:", err) - } - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, 1) - - if _, err := syscall.Write(efd, buf); err != nil { - t.Fatal("unable to write to eventfd:", err) - } - - select { - case <-ch: - case <-time.After(100 * time.Millisecond): - t.Fatal("no notification on channel after 100ms") - } - - // simulate what happens when a cgroup is destroyed by cleaning up and then - // writing to the eventfd. - if err := os.RemoveAll(memoryPath); err != nil { - t.Fatal(err) - } - if _, err := syscall.Write(efd, buf); err != nil { - t.Fatal("unable to write to eventfd:", err) - } - - // give things a moment to shut down - select { - case _, ok := <-ch: - if ok { - t.Fatal("expected no notification to be triggered") - } - case <-time.After(100 * time.Millisecond): - } - - if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(evFd), syscall.F_GETFD, 0); err != syscall.EBADF { - t.Error("expected event control to be closed") - } - - if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { - t.Error("expected event fd to be closed") - } -} - -func TestNotifyOnOOM(t *testing.T) { - f := func(paths map[string]string) (<-chan struct{}, error) { - return notifyOnOOM(paths) - } - - testMemoryNotification(t, "memory.oom_control", f, "") -} - -func TestNotifyMemoryPressure(t *testing.T) { - tests := map[PressureLevel]string{ - LowPressure: "low", - MediumPressure: "medium", - CriticalPressure: "critical", - } - - for level, arg := range tests { - f := func(paths map[string]string) (<-chan struct{}, error) { - return notifyMemoryPressure(paths, level) - } - - testMemoryNotification(t, "memory.pressure_level", f, arg) - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go deleted file mode 100644 index 91e8ef56..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/process.go +++ /dev/null @@ -1,121 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io" - "math" - "os" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -type processOperations interface { - wait() (*os.ProcessState, error) - signal(sig os.Signal) error - pid() int -} - -// Process specifies the configuration and IO for a process inside -// a container. -type Process struct { - // The command to be run followed by any arguments. - Args []string - - // Env specifies the environment variables for the process. - Env []string - - // User will set the uid and gid of the executing process running inside the container - // local to the container's user and group configuration. - User string - - // Cwd will change the processes current working directory inside the container's rootfs. - Cwd string - - // Stdin is a pointer to a reader which provides the standard input stream. - Stdin io.Reader - - // Stdout is a pointer to a writer which receives the standard output stream. - Stdout io.Writer - - // Stderr is a pointer to a writer which receives the standard error stream. - Stderr io.Writer - - // ExtraFiles specifies additional open files to be inherited by the container - ExtraFiles []*os.File - - // consolePath is the path to the console allocated to the container. - consolePath string - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capabilities not specified will be dropped from the processes capability mask - Capabilities []string - - // AppArmorProfile specifies the profile to apply to the process and is - // changed at the time the process is execed - AppArmorProfile string - - // Label specifies the label to apply to the process. It is commonly used by selinux - Label string - - // NoNewPrivileges controls whether processes can gain additional privileges. - NoNewPrivileges *bool - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []configs.Rlimit - - ops processOperations -} - -// Wait waits for the process to exit. -// Wait releases any resources associated with the Process -func (p Process) Wait() (*os.ProcessState, error) { - if p.ops == nil { - return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.wait() -} - -// Pid returns the process ID -func (p Process) Pid() (int, error) { - // math.MinInt32 is returned here, because it's invalid value - // for the kill() system call. - if p.ops == nil { - return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.pid(), nil -} - -// Signal sends a signal to the Process. -func (p Process) Signal(sig os.Signal) error { - if p.ops == nil { - return newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.signal(sig) -} - -// IO holds the process's STDIO -type IO struct { - Stdin io.WriteCloser - Stdout io.ReadCloser - Stderr io.ReadCloser -} - -// NewConsole creates new console for process and returns it -func (p *Process) NewConsole(rootuid int) (Console, error) { - console, err := NewConsole(rootuid, rootuid) - if err != nil { - return nil, err - } - p.consolePath = console.Path() - return console, nil -} - -// ConsoleFromPath sets the process's console with the path provided -func (p *Process) ConsoleFromPath(path string) error { - if p.consolePath != "" { - return newGenericError(fmt.Errorf("console path already exists for process"), ConsoleExists) - } - p.consolePath = path - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go deleted file mode 100644 index 1a2ee0bc..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go +++ /dev/null @@ -1,487 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/utils" -) - -type parentProcess interface { - // pid returns the pid for the running process. - pid() int - - // start starts the process execution. - start() error - - // send a SIGKILL to the process and wait for the exit. - terminate() error - - // wait waits on the process returning the process state. - wait() (*os.ProcessState, error) - - // startTime return's the process start time. - startTime() (string, error) - - signal(os.Signal) error - - externalDescriptors() []string - - setExternalDescriptors(fds []string) -} - -type setnsProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - cgroupPaths map[string]string - config *initConfig - fds []string - process *Process - bootstrapData io.Reader -} - -func (p *setnsProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *setnsProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.pid(), s) -} - -func (p *setnsProcess) start() (err error) { - defer p.parentPipe.Close() - err = p.cmd.Start() - p.childPipe.Close() - if err != nil { - return newSystemError(err) - } - if p.bootstrapData != nil { - if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { - return newSystemError(err) - } - } - if err = p.execSetns(); err != nil { - return newSystemError(err) - } - if len(p.cgroupPaths) > 0 { - if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil { - return newSystemError(err) - } - } - // set oom_score_adj - if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil { - return newSystemError(err) - } - // set rlimits, this has to be done here because we lose permissions - // to raise the limits once we enter a user-namespace - if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { - return newSystemError(err) - } - if err := utils.WriteJSON(p.parentPipe, p.config); err != nil { - return newSystemError(err) - } - - if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { - return newSystemError(err) - } - // wait for the child process to fully complete and receive an error message - // if one was encoutered - var ierr *genericError - if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF { - return newSystemError(err) - } - // Must be done after Shutdown so the child will exit and we can wait for it. - if ierr != nil { - p.wait() - return newSystemError(ierr) - } - return nil -} - -// execSetns runs the process that executes C code to perform the setns calls -// because setns support requires the C process to fork off a child and perform the setns -// before the go runtime boots, we wait on the process to die and receive the child's pid -// over the provided pipe. -func (p *setnsProcess) execSetns() error { - status, err := p.cmd.Process.Wait() - if err != nil { - p.cmd.Wait() - return newSystemError(err) - } - if !status.Success() { - p.cmd.Wait() - return newSystemError(&exec.ExitError{ProcessState: status}) - } - var pid *pid - if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { - p.cmd.Wait() - return newSystemError(err) - } - process, err := os.FindProcess(pid.Pid) - if err != nil { - return err - } - p.cmd.Process = process - p.process.ops = p - return nil -} - -// terminate sends a SIGKILL to the forked process for the setns routine then waits to -// avoid the process becomming a zombie. -func (p *setnsProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *setnsProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - - // Return actual ProcessState even on Wait error - return p.cmd.ProcessState, err -} - -func (p *setnsProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *setnsProcess) externalDescriptors() []string { - return p.fds -} - -func (p *setnsProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -type initProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - config *initConfig - manager cgroups.Manager - container *linuxContainer - fds []string - process *Process - bootstrapData io.Reader - sharePidns bool -} - -func (p *initProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *initProcess) externalDescriptors() []string { - return p.fds -} - -// execSetns runs the process that executes C code to perform the setns calls -// because setns support requires the C process to fork off a child and perform the setns -// before the go runtime boots, we wait on the process to die and receive the child's pid -// over the provided pipe. -// This is called by initProcess.start function -func (p *initProcess) execSetns() error { - status, err := p.cmd.Process.Wait() - if err != nil { - p.cmd.Wait() - return err - } - if !status.Success() { - p.cmd.Wait() - return &exec.ExitError{ProcessState: status} - } - var pid *pid - if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { - p.cmd.Wait() - return err - } - process, err := os.FindProcess(pid.Pid) - if err != nil { - return err - } - p.cmd.Process = process - return nil -} - -func (p *initProcess) start() error { - defer p.parentPipe.Close() - err := p.cmd.Start() - p.process.ops = p - p.childPipe.Close() - if err != nil { - p.process.ops = nil - return newSystemError(err) - } - if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { - return err - } - if err := p.execSetns(); err != nil { - return newSystemError(err) - } - // Save the standard descriptor names before the container process - // can potentially move them (e.g., via dup2()). If we don't do this now, - // we won't know at checkpoint time which file descriptor to look up. - fds, err := getPipeFds(p.pid()) - if err != nil { - return newSystemError(err) - } - p.setExternalDescriptors(fds) - // Do this before syncing with child so that no children - // can escape the cgroup - if err := p.manager.Apply(p.pid()); err != nil { - return newSystemError(err) - } - defer func() { - if err != nil { - // TODO: should not be the responsibility to call here - p.manager.Destroy() - } - }() - if err := p.createNetworkInterfaces(); err != nil { - return newSystemError(err) - } - if err := p.sendConfig(); err != nil { - return newSystemError(err) - } - var ( - procSync syncT - sentRun bool - sentResume bool - ierr *genericError - ) - - dec := json.NewDecoder(p.parentPipe) -loop: - for { - if err := dec.Decode(&procSync); err != nil { - if err == io.EOF { - break loop - } - return newSystemError(err) - } - switch procSync.Type { - case procReady: - if err := p.manager.Set(p.config.Config); err != nil { - return newSystemError(err) - } - // set oom_score_adj - if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil { - return newSystemError(err) - } - // set rlimits, this has to be done here because we lose permissions - // to raise the limits once we enter a user-namespace - if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { - return newSystemError(err) - } - // call prestart hooks - if !p.config.Config.Namespaces.Contains(configs.NEWNS) { - if p.config.Config.Hooks != nil { - s := configs.HookState{ - Version: p.container.config.Version, - ID: p.container.id, - Pid: p.pid(), - Root: p.config.Config.Rootfs, - } - for _, hook := range p.config.Config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemError(err) - } - } - } - } - // Sync with child. - if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil { - return newSystemError(err) - } - sentRun = true - case procHooks: - if p.config.Config.Hooks != nil { - s := configs.HookState{ - Version: p.container.config.Version, - ID: p.container.id, - Pid: p.pid(), - Root: p.config.Config.Rootfs, - BundlePath: utils.SearchLabels(p.config.Config.Labels, "bundle"), - } - for _, hook := range p.config.Config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemError(err) - } - } - } - // Sync with child. - if err := utils.WriteJSON(p.parentPipe, syncT{procResume}); err != nil { - return newSystemError(err) - } - sentResume = true - case procError: - // wait for the child process to fully complete and receive an error message - // if one was encoutered - if err := dec.Decode(&ierr); err != nil && err != io.EOF { - return newSystemError(err) - } - if ierr != nil { - break loop - } - // Programmer error. - panic("No error following JSON procError payload.") - default: - return newSystemError(fmt.Errorf("invalid JSON synchronisation payload from child")) - } - } - if !sentRun { - return newSystemError(fmt.Errorf("could not synchronise with container process: %v", ierr)) - } - if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume { - return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process")) - } - if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { - return newSystemError(err) - } - // Must be done after Shutdown so the child will exit and we can wait for it. - if ierr != nil { - p.wait() - return newSystemError(ierr) - } - return nil -} - -func (p *initProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - if err != nil { - return p.cmd.ProcessState, err - } - // we should kill all processes in cgroup when init is died if we use host PID namespace - if p.sharePidns { - killCgroupProcesses(p.manager) - } - return p.cmd.ProcessState, nil -} - -func (p *initProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *initProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *initProcess) sendConfig() error { - // send the config to the container's init process, we don't use JSON Encode - // here because there might be a problem in JSON decoder in some cases, see: - // https://github.com/docker/docker/issues/14203#issuecomment-174177790 - return utils.WriteJSON(p.parentPipe, p.config) -} - -func (p *initProcess) createNetworkInterfaces() error { - for _, config := range p.config.Config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - n := &network{ - Network: *config, - } - if err := strategy.create(n, p.pid()); err != nil { - return err - } - p.config.Networks = append(p.config.Networks, n) - } - return nil -} - -func (p *initProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.pid(), s) -} - -func (p *initProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -func getPipeFds(pid int) ([]string, error) { - fds := make([]string, 3) - - dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd") - for i := 0; i < 3; i++ { - f := filepath.Join(dirPath, strconv.Itoa(i)) - target, err := os.Readlink(f) - if err != nil { - return fds, err - } - fds[i] = target - } - return fds, nil -} - -// InitializeIO creates pipes for use with the process's STDIO -// and returns the opposite side for each -func (p *Process) InitializeIO(rootuid int) (i *IO, err error) { - var fds []uintptr - i = &IO{} - // cleanup in case of an error - defer func() { - if err != nil { - for _, fd := range fds { - syscall.Close(int(fd)) - } - } - }() - // STDIN - r, w, err := os.Pipe() - if err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stdin, i.Stdin = r, w - // STDOUT - if r, w, err = os.Pipe(); err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stdout, i.Stdout = w, r - // STDERR - if r, w, err = os.Pipe(); err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stderr, i.Stderr = w, r - // change ownership of the pipes incase we are in a user namespace - for _, fd := range fds { - if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil { - return nil, err - } - } - return i, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go deleted file mode 100644 index a96f4ca5..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/opencontainers/runc/libcontainer/system" -) - -func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) { - var ( - err error - ) - proc, err := os.FindProcess(pid) - if err != nil { - return nil, err - } - started, err := system.GetProcessStartTime(pid) - if err != nil { - return nil, err - } - return &restoredProcess{ - proc: proc, - processStartTime: started, - fds: fds, - }, nil -} - -type restoredProcess struct { - proc *os.Process - processStartTime string - fds []string -} - -func (p *restoredProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *restoredProcess) pid() int { - return p.proc.Pid -} - -func (p *restoredProcess) terminate() error { - err := p.proc.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *restoredProcess) wait() (*os.ProcessState, error) { - // TODO: how do we wait on the actual process? - // maybe use --exec-cmd in criu - st, err := p.proc.Wait() - if err != nil { - return nil, err - } - return st, nil -} - -func (p *restoredProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *restoredProcess) signal(s os.Signal) error { - return p.proc.Signal(s) -} - -func (p *restoredProcess) externalDescriptors() []string { - return p.fds -} - -func (p *restoredProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -// nonChildProcess represents a process where the calling process is not -// the parent process. This process is created when a factory loads a container from -// a persisted state. -type nonChildProcess struct { - processPid int - processStartTime string - fds []string -} - -func (p *nonChildProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *nonChildProcess) pid() int { - return p.processPid -} - -func (p *nonChildProcess) terminate() error { - return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError) -} - -func (p *nonChildProcess) wait() (*os.ProcessState, error) { - return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError) -} - -func (p *nonChildProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *nonChildProcess) signal(s os.Signal) error { - proc, err := os.FindProcess(p.processPid) - if err != nil { - return err - } - return proc.Signal(s) -} - -func (p *nonChildProcess) externalDescriptors() []string { - return p.fds -} - -func (p *nonChildProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go deleted file mode 100644 index 4aa4cbd5..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ /dev/null @@ -1,725 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/symlink" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runc/libcontainer/system" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" -) - -const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV - -// setupDev returns true if /dev needs to be set up. -func needsSetupDev(config *configs.Config) bool { - for _, m := range config.Mounts { - if m.Device == "bind" && (m.Destination == "/dev" || m.Destination == "/dev/") { - return false - } - } - return true -} - -// setupRootfs sets up the devices, mount points, and filesystems for use inside a -// new mount namespace. -func setupRootfs(config *configs.Config, console *linuxConsole, pipe io.ReadWriter) (err error) { - if err := prepareRoot(config); err != nil { - return newSystemError(err) - } - - setupDev := needsSetupDev(config) - for _, m := range config.Mounts { - for _, precmd := range m.PremountCmds { - if err := mountCmd(precmd); err != nil { - return newSystemError(err) - } - } - if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { - return newSystemError(err) - } - - for _, postcmd := range m.PostmountCmds { - if err := mountCmd(postcmd); err != nil { - return newSystemError(err) - } - } - } - if setupDev { - if err := createDevices(config); err != nil { - return newSystemError(err) - } - if err := setupPtmx(config, console); err != nil { - return newSystemError(err) - } - if err := setupDevSymlinks(config.Rootfs); err != nil { - return newSystemError(err) - } - } - // Signal the parent to run the pre-start hooks. - // The hooks are run after the mounts are setup, but before we switch to the new - // root, so that the old root is still available in the hooks for any mount - // manipulations. - if err := syncParentHooks(pipe); err != nil { - return err - } - if err := syscall.Chdir(config.Rootfs); err != nil { - return newSystemError(err) - } - if config.NoPivotRoot { - err = msMoveRoot(config.Rootfs) - } else { - err = pivotRoot(config.Rootfs, config.PivotDir) - } - if err != nil { - return newSystemError(err) - } - if setupDev { - if err := reOpenDevNull(); err != nil { - return newSystemError(err) - } - } - // remount dev as ro if specifed - for _, m := range config.Mounts { - if m.Destination == "/dev" { - if m.Flags&syscall.MS_RDONLY != 0 { - if err := remountReadonly(m.Destination); err != nil { - return newSystemError(err) - } - } - break - } - } - // set rootfs ( / ) as readonly - if config.Readonlyfs { - if err := setReadonly(); err != nil { - return newSystemError(err) - } - } - syscall.Umask(0022) - return nil -} - -func mountCmd(cmd configs.Command) error { - - command := exec.Command(cmd.Path, cmd.Args[:]...) - command.Env = cmd.Env - command.Dir = cmd.Dir - if out, err := command.CombinedOutput(); err != nil { - return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err) - } - - return nil -} - -func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { - var ( - dest = m.Destination - ) - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - - switch m.Device { - case "proc", "sysfs": - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - // Selinux kernels do not support labeling of /proc or /sys - return mountPropagate(m, rootfs, "") - case "mqueue": - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - // older kernels do not support labeling of /dev/mqueue - if err := mountPropagate(m, rootfs, ""); err != nil { - return err - } - return label.SetFileLabel(dest, mountLabel) - } - return nil - case "tmpfs": - stat, err := os.Stat(dest) - if err != nil { - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - return err - } - if stat != nil { - if err = os.Chmod(dest, stat.Mode()); err != nil { - return err - } - } - return nil - case "bind": - stat, err := os.Stat(m.Source) - if err != nil { - // error out if the source of a bind mount does not exist as we will be - // unable to bind anything to it. - return err - } - // ensure that the destination of the bind mount is resolved of symlinks at mount time because - // any previous mounts can invalidate the next mount's destination. - // this can happen when a user specifies mounts within other mounts to cause breakouts or other - // evil stuff to try to escape the container's rootfs. - if dest, err = symlink.FollowSymlinkInScope(filepath.Join(rootfs, m.Destination), rootfs); err != nil { - return err - } - if err := checkMountDestination(rootfs, dest); err != nil { - return err - } - // update the mount with the correct dest after symlinks are resolved. - m.Destination = dest - if err := createIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - return err - } - // bind mount won't change mount options, we need remount to make mount options effective. - // first check that we have non-default options required before attempting a remount - if m.Flags&^(syscall.MS_REC|syscall.MS_REMOUNT|syscall.MS_BIND) != 0 { - // only remount if unique mount options are set - if err := remount(m, rootfs); err != nil { - return err - } - } - - if m.Relabel != "" { - if err := label.Validate(m.Relabel); err != nil { - return err - } - shared := label.IsShared(m.Relabel) - if err := label.Relabel(m.Source, mountLabel, shared); err != nil { - return err - } - } - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - var merged []string - for _, b := range binds { - ss := filepath.Base(b.Destination) - if strings.Contains(ss, ",") { - merged = append(merged, ss) - } - } - tmpfs := &configs.Mount{ - Source: "tmpfs", - Device: "tmpfs", - Destination: m.Destination, - Flags: defaultMountFlags, - Data: "mode=755", - PropagationFlags: m.PropagationFlags, - } - if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil { - return err - } - for _, b := range binds { - if err := mountToRootfs(b, rootfs, mountLabel); err != nil { - return err - } - } - // create symlinks for merged cgroups - cwd, err := os.Getwd() - if err != nil { - return err - } - if err := os.Chdir(filepath.Join(rootfs, m.Destination)); err != nil { - return err - } - for _, mc := range merged { - for _, ss := range strings.Split(mc, ",") { - if err := os.Symlink(mc, ss); err != nil { - // if cgroup already exists, then okay(it could have been created before) - if os.IsExist(err) { - continue - } - os.Chdir(cwd) - return err - } - } - } - if err := os.Chdir(cwd); err != nil { - return err - } - if m.Flags&syscall.MS_RDONLY != 0 { - // remount cgroup root as readonly - mcgrouproot := &configs.Mount{ - Destination: m.Destination, - Flags: defaultMountFlags | syscall.MS_RDONLY, - } - if err := remount(mcgrouproot, rootfs); err != nil { - return err - } - } - default: - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - return mountPropagate(m, rootfs, mountLabel) - } - return nil -} - -func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { - mounts, err := cgroups.GetCgroupMounts() - if err != nil { - return nil, err - } - - cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return nil, err - } - - var binds []*configs.Mount - - for _, mm := range mounts { - dir, err := mm.GetThisCgroupDir(cgroupPaths) - if err != nil { - return nil, err - } - relDir, err := filepath.Rel(mm.Root, dir) - if err != nil { - return nil, err - } - binds = append(binds, &configs.Mount{ - Device: "bind", - Source: filepath.Join(mm.Mountpoint, relDir), - Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")), - Flags: syscall.MS_BIND | syscall.MS_REC | m.Flags, - PropagationFlags: m.PropagationFlags, - }) - } - - return binds, nil -} - -// checkMountDestination checks to ensure that the mount destination is not over the top of /proc. -// dest is required to be an abs path and have any symlinks resolved before calling this function. -func checkMountDestination(rootfs, dest string) error { - if libcontainerUtils.CleanPath(rootfs) == libcontainerUtils.CleanPath(dest) { - return fmt.Errorf("mounting into / is prohibited") - } - invalidDestinations := []string{ - "/proc", - } - // White list, it should be sub directories of invalid destinations - validDestinations := []string{ - // These entries can be bind mounted by files emulated by fuse, - // so commands like top, free displays stats in container. - "/proc/cpuinfo", - "/proc/diskstats", - "/proc/meminfo", - "/proc/stat", - "/proc/net/dev", - } - for _, valid := range validDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, valid), dest) - if err != nil { - return err - } - if path == "." { - return nil - } - } - for _, invalid := range invalidDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) - if err != nil { - return err - } - if path == "." || !strings.HasPrefix(path, "..") { - return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) - } - } - return nil -} - -func setupDevSymlinks(rootfs string) error { - var links = [][2]string{ - {"/proc/self/fd", "/dev/fd"}, - {"/proc/self/fd/0", "/dev/stdin"}, - {"/proc/self/fd/1", "/dev/stdout"}, - {"/proc/self/fd/2", "/dev/stderr"}, - } - // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink - // in /dev if it exists in /proc. - if _, err := os.Stat("/proc/kcore"); err == nil { - links = append(links, [2]string{"/proc/kcore", "/dev/core"}) - } - for _, link := range links { - var ( - src = link[0] - dst = filepath.Join(rootfs, link[1]) - ) - if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { - return fmt.Errorf("symlink %s %s %s", src, dst, err) - } - } - return nil -} - -// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs -// this method will make them point to `/dev/null` in this container's rootfs. This -// needs to be called after we chroot/pivot into the container's rootfs so that any -// symlinks are resolved locally. -func reOpenDevNull() error { - var stat, devNullStat syscall.Stat_t - file, err := os.OpenFile("/dev/null", os.O_RDWR, 0) - if err != nil { - return fmt.Errorf("Failed to open /dev/null - %s", err) - } - defer file.Close() - if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { - return err - } - for fd := 0; fd < 3; fd++ { - if err := syscall.Fstat(fd, &stat); err != nil { - return err - } - if stat.Rdev == devNullStat.Rdev { - // Close and re-open the fd. - if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil { - return err - } - } - } - return nil -} - -// Create the device nodes in the container. -func createDevices(config *configs.Config) error { - useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER) - oldMask := syscall.Umask(0000) - for _, node := range config.Devices { - // containers running in a user namespace are not allowed to mknod - // devices so we can just bind mount it from the host. - if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil { - syscall.Umask(oldMask) - return err - } - } - syscall.Umask(oldMask) - return nil -} - -func bindMountDeviceNode(dest string, node *configs.Device) error { - f, err := os.Create(dest) - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "") -} - -// Creates the device node in the rootfs of the container. -func createDeviceNode(rootfs string, node *configs.Device, bind bool) error { - dest := filepath.Join(rootfs, node.Path) - if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { - return err - } - - if bind { - return bindMountDeviceNode(dest, node) - } - if err := mknodDevice(dest, node); err != nil { - if os.IsExist(err) { - return nil - } else if os.IsPermission(err) { - return bindMountDeviceNode(dest, node) - } - return err - } - return nil -} - -func mknodDevice(dest string, node *configs.Device) error { - fileMode := node.FileMode - switch node.Type { - case 'c': - fileMode |= syscall.S_IFCHR - case 'b': - fileMode |= syscall.S_IFBLK - default: - return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) - } - if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil { - return err - } - return syscall.Chown(dest, int(node.Uid), int(node.Gid)) -} - -func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { - for _, m := range mountinfo { - if m.Mountpoint == dir { - return m - } - } - return nil -} - -// Get the parent mount point of directory passed in as argument. Also return -// optional fields. -func getParentMount(rootfs string) (string, string, error) { - var path string - - mountinfos, err := mount.GetMounts() - if err != nil { - return "", "", err - } - - mountinfo := getMountInfo(mountinfos, rootfs) - if mountinfo != nil { - return rootfs, mountinfo.Optional, nil - } - - path = rootfs - for { - path = filepath.Dir(path) - - mountinfo = getMountInfo(mountinfos, path) - if mountinfo != nil { - return path, mountinfo.Optional, nil - } - - if path == "/" { - break - } - } - - // If we are here, we did not find parent mount. Something is wrong. - return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs) -} - -// Make parent mount private if it was shared -func rootfsParentMountPrivate(config *configs.Config) error { - sharedMount := false - - parentMount, optionalOpts, err := getParentMount(config.Rootfs) - if err != nil { - return err - } - - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } - } - - // Make parent mount PRIVATE if it was shared. It is needed for two - // reasons. First of all pivot_root() will fail if parent mount is - // shared. Secondly when we bind mount rootfs it will propagate to - // parent namespace and we don't want that to happen. - if sharedMount { - return syscall.Mount("", parentMount, "", syscall.MS_PRIVATE, "") - } - - return nil -} - -func prepareRoot(config *configs.Config) error { - flag := syscall.MS_SLAVE | syscall.MS_REC - if config.RootPropagation != 0 { - flag = config.RootPropagation - } - if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { - return err - } - - if err := rootfsParentMountPrivate(config); err != nil { - return err - } - - return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "") -} - -func setReadonly() error { - return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") -} - -func setupPtmx(config *configs.Config, console *linuxConsole) error { - ptmx := filepath.Join(config.Rootfs, "dev/ptmx") - if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { - return err - } - if err := os.Symlink("pts/ptmx", ptmx); err != nil { - return fmt.Errorf("symlink dev ptmx %s", err) - } - if console != nil { - return console.mount(config.Rootfs, config.MountLabel) - } - return nil -} - -func pivotRoot(rootfs, pivotBaseDir string) (err error) { - if pivotBaseDir == "" { - pivotBaseDir = "/" - } - tmpDir := filepath.Join(rootfs, pivotBaseDir) - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err) - } - pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root") - if err != nil { - return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) - } - defer func() { - errVal := os.Remove(pivotDir) - if err == nil { - err = errVal - } - }() - if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { - return fmt.Errorf("pivot_root %s", err) - } - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - // path to pivot dir now changed, update - pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir)) - - // Make pivotDir rprivate to make sure any of the unmounts don't - // propagate to parent. - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { - return err - } - - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("unmount pivot_root dir %s", err) - } - return nil -} - -func msMoveRoot(rootfs string) error { - if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { - return err - } - if err := syscall.Chroot("."); err != nil { - return err - } - return syscall.Chdir("/") -} - -// createIfNotExists creates a file or a directory only if it does not already exist. -func createIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} - -// remountReadonly will bind over the top of an existing path and ensure that it is read-only. -func remountReadonly(path string) error { - for i := 0; i < 5; i++ { - if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { - switch err { - case syscall.EINVAL: - // Probably not a mountpoint, use bind-mount - if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { - return err - } - return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") - case syscall.EBUSY: - time.Sleep(100 * time.Millisecond) - continue - default: - return err - } - } - return nil - } - return fmt.Errorf("unable to mount %s as readonly max retries reached", path) -} - -// maskFile bind mounts /dev/null over the top of the specified path inside a container -// to avoid security issues from processes reading information from non-namespace aware mounts ( proc/kcore ). -func maskFile(path string) error { - if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// writeSystemProperty writes the value to a path under /proc/sys as determined from the key. -// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. -func writeSystemProperty(key, value string) error { - keyPath := strings.Replace(key, ".", "/", -1) - return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) -} - -func remount(m *configs.Mount, rootfs string) error { - var ( - dest = m.Destination - ) - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil { - return err - } - return nil -} - -// Do the mount operation followed by additional mounts required to take care -// of propagation flags. -func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error { - var ( - dest = m.Destination - data = label.FormatMountLabel(m.Data, mountLabel) - flags = m.Flags - ) - if dest == "/dev" { - flags &= ^syscall.MS_RDONLY - } - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil { - return err - } - - for _, pflag := range m.PropagationFlags { - if err := syscall.Mount("", dest, "", uintptr(pflag), ""); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux_test.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux_test.go deleted file mode 100644 index f70e10be..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "testing" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -func TestCheckMountDestOnProc(t *testing.T) { - dest := "/rootfs/proc/" - err := checkMountDestination("/rootfs", dest) - if err == nil { - t.Fatal("destination inside proc should return an error") - } -} - -func TestCheckMountDestInSys(t *testing.T) { - dest := "/rootfs//sys/fs/cgroup" - err := checkMountDestination("/rootfs", dest) - if err != nil { - t.Fatal("destination inside /sys should not return an error") - } -} - -func TestCheckMountDestFalsePositive(t *testing.T) { - dest := "/rootfs/sysfiles/fs/cgroup" - err := checkMountDestination("/rootfs", dest) - if err != nil { - t.Fatal(err) - } -} - -func TestCheckMountRoot(t *testing.T) { - dest := "/rootfs" - err := checkMountDestination("/rootfs", dest) - if err == nil { - t.Fatal(err) - } -} - -func TestNeedsSetupDev(t *testing.T) { - config := &configs.Config{ - Mounts: []*configs.Mount{ - { - Device: "bind", - Source: "/dev", - Destination: "/dev", - }, - }, - } - if needsSetupDev(config) { - t.Fatal("expected needsSetupDev to be false, got true") - } -} - -func TestNeedsSetupDevStrangeSource(t *testing.T) { - config := &configs.Config{ - Mounts: []*configs.Mount{ - { - Device: "bind", - Source: "/devx", - Destination: "/dev", - }, - }, - } - if needsSetupDev(config) { - t.Fatal("expected needsSetupDev to be false, got true") - } -} - -func TestNeedsSetupDevStrangeDest(t *testing.T) { - config := &configs.Config{ - Mounts: []*configs.Mount{ - { - Device: "bind", - Source: "/dev", - Destination: "/devx", - }, - }, - } - if !needsSetupDev(config) { - t.Fatal("expected needsSetupDev to be true, got false") - } -} - -func TestNeedsSetupDevStrangeSourceDest(t *testing.T) { - config := &configs.Config{ - Mounts: []*configs.Mount{ - { - Device: "bind", - Source: "/devx", - Destination: "/devx", - }, - }, - } - if !needsSetupDev(config) { - t.Fatal("expected needsSetupDev to be true, got false") - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go deleted file mode 100644 index c7bdb605..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,go1.5 - -package libcontainer - -import "syscall" - -// Set the GidMappingsEnableSetgroups member to true, so the process's -// setgroups proc entry wont be set to 'deny' if GidMappings are set -func enableSetgroups(sys *syscall.SysProcAttr) { - sys.GidMappingsEnableSetgroups = true -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go deleted file mode 100644 index b1a198fd..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/keys" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runc/libcontainer/seccomp" - "github.com/opencontainers/runc/libcontainer/system" -) - -// linuxSetnsInit performs the container's initialization for running a new process -// inside an existing container. -type linuxSetnsInit struct { - config *initConfig -} - -func (l *linuxSetnsInit) getSessionRingName() string { - return fmt.Sprintf("_ses.%s", l.config.ContainerId) -} - -func (l *linuxSetnsInit) Init() error { - // do not inherit the parent's session keyring - if _, err := keyctl.JoinSessionKeyring(l.getSessionRingName()); err != nil { - return err - } - if l.config.NoNewPrivileges { - if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { - return err - } - } - if l.config.Config.Seccomp != nil { - if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { - return err - } - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { - return err - } - if l.config.ProcessLabel != "" { - if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { - return err - } - } - return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go deleted file mode 100644 index 59bd3700..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ /dev/null @@ -1,147 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io" - "os" - "syscall" - - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/keys" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runc/libcontainer/seccomp" - "github.com/opencontainers/runc/libcontainer/system" -) - -type linuxStandardInit struct { - pipe io.ReadWriter - parentPid int - config *initConfig -} - -func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { - var newperms uint32 - - if l.config.Config.Namespaces.Contains(configs.NEWUSER) { - // with user ns we need 'other' search permissions - newperms = 0x8 - } else { - // without user ns we need 'UID' search permissions - newperms = 0x80000 - } - - // create a unique per session container name that we can - // join in setns; however, other containers can also join it - return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms -} - -// PR_SET_NO_NEW_PRIVS isn't exposed in Golang so we define it ourselves copying the value -// the kernel -const PR_SET_NO_NEW_PRIVS = 0x26 - -func (l *linuxStandardInit) Init() error { - ringname, keepperms, newperms := l.getSessionRingParams() - - // do not inherit the parent's session keyring - sessKeyId, err := keyctl.JoinSessionKeyring(ringname) - if err != nil { - return err - } - // make session keyring searcheable - if err := keyctl.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { - return err - } - - var console *linuxConsole - if l.config.Console != "" { - console = newConsoleFromPath(l.config.Console) - if err := console.dupStdio(); err != nil { - return err - } - } - if console != nil { - if err := system.Setctty(); err != nil { - return err - } - } - if err := setupNetwork(l.config); err != nil { - return err - } - if err := setupRoute(l.config.Config); err != nil { - return err - } - - label.Init() - // InitializeMountNamespace() can be executed only for a new mount namespace - if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := setupRootfs(l.config.Config, console, l.pipe); err != nil { - return err - } - } - if hostname := l.config.Config.Hostname; hostname != "" { - if err := syscall.Sethostname([]byte(hostname)); err != nil { - return err - } - } - if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { - return err - } - if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { - return err - } - - for key, value := range l.config.Config.Sysctl { - if err := writeSystemProperty(key, value); err != nil { - return err - } - } - for _, path := range l.config.Config.ReadonlyPaths { - if err := remountReadonly(path); err != nil { - return err - } - } - for _, path := range l.config.Config.MaskPaths { - if err := maskFile(path); err != nil { - return err - } - } - pdeath, err := system.GetParentDeathSignal() - if err != nil { - return err - } - if l.config.NoNewPrivileges { - if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { - return err - } - } - // Tell our parent that we're ready to Execv. This must be done before the - // Seccomp rules have been applied, because we need to be able to read and - // write to a socket. - if err := syncParentReady(l.pipe); err != nil { - return err - } - if l.config.Config.Seccomp != nil { - if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { - return err - } - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - // finalizeNamespace can change user/group which clears the parent death - // signal, so we restore it here. - if err := pdeath.Restore(); err != nil { - return err - } - // compare the parent from the inital start of the init process and make sure that it did not change. - // if the parent changes that means it died and we were reparened to something else so we should - // just kill ourself and not cause problems for someone else. - if syscall.Getppid() != l.parentPid { - return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) - } - - return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go deleted file mode 100644 index d2618f69..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go +++ /dev/null @@ -1,228 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" -) - -func newStateTransitionError(from, to containerState) error { - return &stateTransitionError{ - From: from.status().String(), - To: to.status().String(), - } -} - -// stateTransitionError is returned when an invalid state transition happens from one -// state to another. -type stateTransitionError struct { - From string - To string -} - -func (s *stateTransitionError) Error() string { - return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To) -} - -type containerState interface { - transition(containerState) error - destroy() error - status() Status -} - -func destroy(c *linuxContainer) error { - if !c.config.Namespaces.Contains(configs.NEWPID) { - if err := killCgroupProcesses(c.cgroupManager); err != nil { - logrus.Warn(err) - } - } - err := c.cgroupManager.Destroy() - if rerr := os.RemoveAll(c.root); err == nil { - err = rerr - } - c.initProcess = nil - if herr := runPoststopHooks(c); err == nil { - err = herr - } - c.state = &stoppedState{c: c} - return err -} - -func runPoststopHooks(c *linuxContainer) error { - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Root: c.config.Rootfs, - BundlePath: utils.SearchLabels(c.config.Labels, "bundle"), - } - for _, hook := range c.config.Hooks.Poststop { - if err := hook.Run(s); err != nil { - return err - } - } - } - return nil -} - -// stoppedState represents a container is a stopped/destroyed state. -type stoppedState struct { - c *linuxContainer -} - -func (b *stoppedState) status() Status { - return Destroyed -} - -func (b *stoppedState) transition(s containerState) error { - switch s.(type) { - case *runningState: - b.c.state = s - return nil - case *restoredState: - b.c.state = s - return nil - case *stoppedState: - return nil - } - return newStateTransitionError(b, s) -} - -func (b *stoppedState) destroy() error { - return destroy(b.c) -} - -// runningState represents a container that is currently running. -type runningState struct { - c *linuxContainer -} - -func (r *runningState) status() Status { - return Running -} - -func (r *runningState) transition(s containerState) error { - switch s.(type) { - case *stoppedState: - running, err := r.c.isRunning() - if err != nil { - return err - } - if running { - return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped) - } - r.c.state = s - return nil - case *pausedState: - r.c.state = s - return nil - case *runningState: - return nil - } - return newStateTransitionError(r, s) -} - -func (r *runningState) destroy() error { - running, err := r.c.isRunning() - if err != nil { - return err - } - if running { - return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped) - } - return destroy(r.c) -} - -// pausedState represents a container that is currently pause. It cannot be destroyed in a -// paused state and must transition back to running first. -type pausedState struct { - c *linuxContainer -} - -func (p *pausedState) status() Status { - return Paused -} - -func (p *pausedState) transition(s containerState) error { - switch s.(type) { - case *runningState, *stoppedState: - p.c.state = s - return nil - case *pausedState: - return nil - } - return newStateTransitionError(p, s) -} - -func (p *pausedState) destroy() error { - isRunning, err := p.c.isRunning() - if err != nil { - return err - } - if !isRunning { - if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil { - return err - } - return destroy(p.c) - } - return newGenericError(fmt.Errorf("container is paused"), ContainerPaused) -} - -// restoredState is the same as the running state but also has accociated checkpoint -// information that maybe need destroyed when the container is stopped and destory is called. -type restoredState struct { - imageDir string - c *linuxContainer -} - -func (r *restoredState) status() Status { - return Running -} - -func (r *restoredState) transition(s containerState) error { - switch s.(type) { - case *stoppedState: - return nil - case *runningState: - return nil - } - return newStateTransitionError(r, s) -} - -func (r *restoredState) destroy() error { - if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return destroy(r.c) -} - -// createdState is used whenever a container is restored, loaded, or setting additional -// processes inside and it should not be destroyed when it is exiting. -type createdState struct { - c *linuxContainer - s Status -} - -func (n *createdState) status() Status { - return n.s -} - -func (n *createdState) transition(s containerState) error { - n.c.state = s - return nil -} - -func (n *createdState) destroy() error { - if err := n.c.refreshState(); err != nil { - return err - } - return n.c.state.destroy() -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux_test.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux_test.go deleted file mode 100644 index 417d9c22..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/state_linux_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build linux - -package libcontainer - -import "testing" - -func TestStateStatus(t *testing.T) { - states := map[containerState]Status{ - &stoppedState{}: Destroyed, - &runningState{}: Running, - &restoredState{}: Running, - &pausedState{}: Paused, - } - for s, status := range states { - if s.status() != status { - t.Fatalf("state returned %s but expected %s", s.status(), status) - } - } -} - -func isStateTransitionError(err error) bool { - _, ok := err.(*stateTransitionError) - return ok -} - -func TestStoppedStateTransition(t *testing.T) { - s := &stoppedState{c: &linuxContainer{}} - valid := []containerState{ - &stoppedState{}, - &runningState{}, - &restoredState{}, - } - for _, v := range valid { - if err := s.transition(v); err != nil { - t.Fatal(err) - } - } - err := s.transition(&pausedState{}) - if err == nil { - t.Fatal("transition to paused state should fail") - } - if !isStateTransitionError(err) { - t.Fatal("expected stateTransitionError") - } -} - -func TestPausedStateTransition(t *testing.T) { - s := &pausedState{c: &linuxContainer{}} - valid := []containerState{ - &pausedState{}, - &runningState{}, - &stoppedState{}, - } - for _, v := range valid { - if err := s.transition(v); err != nil { - t.Fatal(err) - } - } -} - -func TestRestoredStateTransition(t *testing.T) { - s := &restoredState{c: &linuxContainer{}} - valid := []containerState{ - &stoppedState{}, - &runningState{}, - } - for _, v := range valid { - if err := s.transition(v); err != nil { - t.Fatal(err) - } - } - err := s.transition(&createdState{}) - if err == nil { - t.Fatal("transition to created state should fail") - } - if !isStateTransitionError(err) { - t.Fatal("expected stateTransitionError") - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/stats.go deleted file mode 100644 index 303e4b94..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -type NetworkInterface struct { - // Name is the name of the network interface. - Name string - - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go deleted file mode 100644 index f8d1d689..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go deleted file mode 100644 index c629dc67..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go +++ /dev/null @@ -1,8 +0,0 @@ -package libcontainer - -import "github.com/opencontainers/runc/libcontainer/cgroups" - -type Stats struct { - Interfaces []*NetworkInterface - CgroupStats *cgroups.Stats -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go deleted file mode 100644 index f8d1d689..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD.bazel b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD.bazel deleted file mode 100644 index 595b0aa9..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD.bazel +++ /dev/null @@ -1,84 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "proc.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "sysconfig.go", - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "linux.go", - "setns_linux.go", - "sysconfig.go", - "sysconfig_notcgo.go", - "xattrs_linux.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "sysconfig_notcgo.go", - "unsupported.go", - ], - "//conditions:default": [], - }) + select({ - "@io_bazel_rules_go//go/platform:linux_386": [ - "syscall_linux_386.go", - ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "syscall_linux_64.go", - ], - "@io_bazel_rules_go//go/platform:linux_arm": [ - "syscall_linux_arm.go", - ], - "@io_bazel_rules_go//go/platform:linux_arm64": [ - "syscall_linux_64.go", - ], - "@io_bazel_rules_go//go/platform:linux_ppc64": [ - "syscall_linux_64.go", - ], - "@io_bazel_rules_go//go/platform:linux_ppc64le": [ - "syscall_linux_64.go", - ], - "@io_bazel_rules_go//go/platform:linux_s390x": [ - "syscall_linux_64.go", - ], - "//conditions:default": [], - }), - cgo = True, - importpath = "github.com/opencontainers/runc/libcontainer/system", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go deleted file mode 100644 index 8b199d92..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ /dev/null @@ -1,148 +0,0 @@ -// +build linux - -package system - -import ( - "bufio" - "fmt" - "os" - "os/exec" - "syscall" - "unsafe" -) - -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const PR_SET_CHILD_SUBREAPER = 36 - -type ParentDeathSignal int - -func (p ParentDeathSignal) Restore() error { - if p == 0 { - return nil - } - current, err := GetParentDeathSignal() - if err != nil { - return err - } - if p == current { - return nil - } - return p.Set() -} - -func (p ParentDeathSignal) Set() error { - return SetParentDeathSignal(uintptr(p)) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - - return syscall.Exec(name, args, env) -} - -func Prlimit(pid, resource int, limit syscall.Rlimit) error { - _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) - if err != 0 { - return err - } - return nil -} - -func SetParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { - return err - } - return nil -} - -func GetParentDeathSignal() (ParentDeathSignal, error) { - var sig int - _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { - return -1, err - } - return ParentDeathSignal(sig), nil -} - -func SetKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { - return err - } - - return nil -} - -func ClearKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { - return err - } - - return nil -} - -func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { - return err - } - return nil -} - -/* - * Detect whether we are currently running in a user namespace. - * Copied from github.com/lxc/lxd/shared/util.go - */ -func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ - return false - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return false - } - return true -} - -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) -} - -func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go deleted file mode 100644 index 37808a29..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself -func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) - if err != nil { - return "", err - } - - parts := strings.Split(string(data), " ") - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - return parts[22-1], nil // starts at 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go deleted file mode 100644 index 615ff4c8..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/arm64": 268, - "linux/amd64": 308, - "linux/arm": 375, - "linux/ppc": 350, - "linux/ppc64": 350, - "linux/ppc64le": 350, - "linux/s390x": 339, -} - -var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - -func SysSetns() uint32 { - return uint32(sysSetns) -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index c9900651..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go deleted file mode 100644 index 0816bf82..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go deleted file mode 100644 index 3f780f31..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go deleted file mode 100644 index b3a07cba..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build cgo,linux cgo,freebsd - -package system - -/* -#include -*/ -import "C" - -func GetClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go deleted file mode 100644 index d93b5d5f..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !cgo windows - -package system - -func GetClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go deleted file mode 100644 index e7cfd62b..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package system - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go deleted file mode 100644 index 30f74dfb..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ /dev/null @@ -1,99 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var _zero uintptr - -// Returns the size of xattrs and nil error -// Requires path, takes allocated []byte or nil as last argument -func Llistxattr(path string, dest []byte) (size int, err error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return -1, err - } - var newpathBytes unsafe.Pointer - if len(dest) > 0 { - newpathBytes = unsafe.Pointer(&dest[0]) - } else { - newpathBytes = unsafe.Pointer(&_zero) - } - - _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) - size = int(_size) - if errno != 0 { - return -1, errno - } - - return size, nil -} - -// Returns a []byte slice if the xattr is set and nil otherwise -// Requires path and its attribute as arguments -func Lgetxattr(path string, attr string) ([]byte, error) { - var sz int - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - // Start with a 128 length byte array - sz = 128 - dest := make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - - switch { - case errno == syscall.ENODATA: - return nil, errno - case errno == syscall.ENOTSUP: - return nil, errno - case errno == syscall.ERANGE: - // 128 byte array might just not be good enough, - // A dummy buffer is used ``uintptr(0)`` to get real size - // of the xattrs on disk - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) - sz = int(_sz) - if sz < 0 { - return nil, errno - } - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno != 0 { - return nil, errno - } - case errno != 0: - return nil, errno - } - sz = int(_sz) - return dest[:sz], nil -} - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD.bazel b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD.bazel deleted file mode 100644 index 4ab2319a..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD.bazel +++ /dev/null @@ -1,53 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "lookup.go", - "user.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "lookup_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "lookup_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "lookup_unsupported.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "lookup_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "lookup_unsupported.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/opencontainers/runc/libcontainer/user", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["user_test.go"], - embed = [":go_default_library"], - importpath = "github.com/opencontainers/runc/libcontainer/user", -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe2006..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_test.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_test.go deleted file mode 100644 index 8cad2f55..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_test.go +++ /dev/null @@ -1,500 +0,0 @@ -package user - -import ( - "io" - "reflect" - "sort" - "strconv" - "strings" - "testing" -) - -func TestUserParseLine(t *testing.T) { - var ( - a, b string - c []string - d int - ) - - parseLine("", &a, &b) - if a != "" || b != "" { - t.Fatalf("a and b should be empty ('%v', '%v')", a, b) - } - - parseLine("a", &a, &b) - if a != "a" || b != "" { - t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) - } - - parseLine("bad boys:corny cows", &a, &b) - if a != "bad boys" || b != "corny cows" { - t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) - } - - parseLine("", &c) - if len(c) != 0 { - t.Fatalf("c should be empty (%#v)", c) - } - - parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) - if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { - t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("::::::::::", &a, &b, &c) - if a != "" || b != "" || len(c) != 0 { - t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("not a number", &d) - if d != 0 { - t.Fatalf("d should be 0 (%v)", d) - } - - parseLine("b:12:c", &a, &d, &b) - if a != "b" || b != "c" || d != 12 { - t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) - } -} - -func TestUserParsePasswd(t *testing.T) { - users, err := ParsePasswdFilter(strings.NewReader(` -root:x:0:0:root:/root:/bin/bash -adm:x:3:4:adm:/var/adm:/bin/false -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(users) != 3 { - t.Fatalf("Expected 3 users, got %v", len(users)) - } - if users[0].Uid != 0 || users[0].Name != "root" { - t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) - } - if users[1].Uid != 3 || users[1].Name != "adm" { - t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) - } -} - -func TestUserParseGroup(t *testing.T) { - groups, err := ParseGroupFilter(strings.NewReader(` -root:x:0:root -adm:x:4:root,adm,daemon -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(groups) != 3 { - t.Fatalf("Expected 3 groups, got %v", len(groups)) - } - if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { - t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) - } - if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { - t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) - } -} - -func TestValidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -111:x:222:333::/var/garbage -odd:x:111:112::/home/odd::::: -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -444:x:555:111 -odd:x:444: -this is just some garbage data -` - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - expected ExecUser - }{ - { - ref: "root", - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{0, 1234}, - Home: "/root", - }, - }, - { - ref: "adm", - expected: ExecUser{ - Uid: 42, - Gid: 43, - Sgids: []int{1234}, - Home: "/var/adm", - }, - }, - { - ref: "root:adm", - expected: ExecUser{ - Uid: 0, - Gid: 43, - Sgids: defaultExecUser.Sgids, - Home: "/root", - }, - }, - { - ref: "adm:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "42:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "1337:1234", - expected: ExecUser{ - Uid: 1337, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "1337", - expected: ExecUser{ - Uid: 1337, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "", - expected: ExecUser{ - Uid: defaultExecUser.Uid, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - - // Regression tests for #695. - { - ref: "111", - expected: ExecUser{ - Uid: 111, - Gid: 112, - Sgids: defaultExecUser.Sgids, - Home: "/home/odd", - }, - }, - { - ref: "111:444", - expected: ExecUser{ - Uid: 111, - Gid: 444, - Sgids: defaultExecUser.Sgids, - Home: "/home/odd", - }, - }, - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("ref: %v", test.ref) - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestInvalidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false --42:x:12:13:broken:/very/broken -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - tests := []string{ - // No such user/group. - "notuser", - "notuser:notgroup", - "root:notgroup", - "notuser:adm", - "8888:notgroup", - "notuser:8888", - - // Invalid user/group values. - "-1:0", - "0:-3", - "-5:-2", - "-42", - "-43", - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test, nil, passwd, group) - if err == nil { - t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) - t.Fail() - continue - } - } -} - -func TestGetExecUserNilSources(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - passwd, group bool - expected ExecUser - }{ - { - ref: "", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "root", - passwd: true, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/root", - }, - }, - { - ref: "0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "0:0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - } - - for _, test := range tests { - var passwd, group io.Reader - - if test.passwd { - passwd = strings.NewReader(passwdContent) - } - - if test.group { - group = strings.NewReader(groupContent) - } - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestGetAdditionalGroups(t *testing.T) { - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -adm:x:4343:root,adm-duplicate -this is just some garbage data -` - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // empty group - groups: []string{}, - expected: []int{}, - }, - { - // single group - groups: []string{"adm"}, - expected: []int{43}, - }, - { - // multiple groups - groups: []string{"adm", "grp"}, - expected: []int{43, 1234}, - }, - { - // invalid group - groups: []string{"adm", "grp", "not-exist"}, - expected: nil, - hasError: true, - }, - { - // group with numeric id - groups: []string{"43"}, - expected: []int{43}, - }, - { - // group with unknown numeric id - groups: []string{"adm", "10001"}, - expected: []int{43, 10001}, - }, - { - // groups specified twice with numeric and name - groups: []string{"adm", "43"}, - expected: []int{43}, - }, - { - // groups with too small id - groups: []string{"-1"}, - expected: nil, - hasError: true, - }, - { - // groups with too large id - groups: []string{strconv.Itoa(1 << 31)}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - group := strings.NewReader(groupContent) - - gids, err := GetAdditionalGroups(test.groups, group) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} - -func TestGetAdditionalGroupsNumeric(t *testing.T) { - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // numeric groups only - groups: []string{"1234", "5678"}, - expected: []int{1234, 5678}, - }, - { - // numeric and alphabetic - groups: []string{"1234", "fake"}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - gids, err := GetAdditionalGroups(test.groups, nil) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} diff --git a/vendor/github.com/opencontainers/runc/list.go b/vendor/github.com/opencontainers/runc/list.go deleted file mode 100644 index 1750ef81..00000000 --- a/vendor/github.com/opencontainers/runc/list.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "text/tabwriter" - "time" - - "encoding/json" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer/utils" -) - -const formatOptions = `table or json` - -// containerState represents the platform agnostic pieces relating to a -// running container's status and state -type containerState struct { - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundle"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` -} - -var listCommand = cli.Command{ - Name: "list", - Usage: "lists containers started by runc with the given root", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "format, f", - Value: "", - Usage: `select one of: ` + formatOptions + `. - -The default format is table. The following will output the list of containers -in json format: - - # runc list -f json`, - }, - }, - Action: func(context *cli.Context) { - s, err := getContainers(context) - if err != nil { - fatal(err) - } - - switch context.String("format") { - case "", "table": - w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0) - fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\n") - for _, item := range s { - fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", - item.ID, - item.InitProcessPid, - item.Status, - item.Bundle, - item.Created.Format(time.RFC3339Nano)) - } - if err := w.Flush(); err != nil { - fatal(err) - } - case "json": - data, err := json.Marshal(s) - if err != nil { - fatal(err) - } - os.Stdout.Write(data) - - default: - fatalf("invalid format option") - } - }, -} - -func getContainers(context *cli.Context) ([]containerState, error) { - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - root := context.GlobalString("root") - absRoot, err := filepath.Abs(root) - if err != nil { - return nil, err - } - list, err := ioutil.ReadDir(absRoot) - if err != nil { - fatal(err) - } - - var s []containerState - for _, item := range list { - if item.IsDir() { - container, err := factory.Load(item.Name()) - if err != nil { - return nil, err - } - containerStatus, err := container.Status() - if err != nil { - return nil, err - } - state, err := container.State() - if err != nil { - return nil, err - } - s = append(s, containerState{ - ID: state.BaseState.ID, - InitProcessPid: state.BaseState.InitProcessPid, - Status: containerStatus.String(), - Bundle: utils.SearchLabels(state.Config.Labels, "bundle"), - Created: state.BaseState.Created}) - } - } - return s, nil -} diff --git a/vendor/github.com/opencontainers/runc/main.go b/vendor/github.com/opencontainers/runc/main.go deleted file mode 100644 index 3524937f..00000000 --- a/vendor/github.com/opencontainers/runc/main.go +++ /dev/null @@ -1,126 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/codegangsta/cli" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// gitCommit will be the hash that the binary was built from -// and will be populated by the Makefile -var gitCommit = "" - -const ( - version = "0.1.1" - specConfig = "config.json" - usage = `Open Container Initiative runtime - -runc is a command line client for running applications packaged according to -the Open Container Format (OCF) and is a compliant implementation of the -Open Container Initiative specification. - -runc integrates well with existing process supervisors to provide a production -container runtime environment for applications. It can be used with your -existing process monitoring tools and the container will be spawned as a -direct child of the process supervisor. - -Containers are configured using bundles. A bundle for a container is a directory -that includes a specification file named "` + specConfig + `" and a root filesystem. -The root filesystem contains the contents of the container. - -To start a new instance of a container: - - # runc start [ -b bundle ] - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host. Providing the bundle directory using "-b" is optional. The default -value for "bundle" is the current directory.` -) - -func main() { - app := cli.NewApp() - app.Name = "runc" - app.Usage = usage - v := []string{ - version, - } - if gitCommit != "" { - v = append(v, fmt.Sprintf("commit: %s", gitCommit)) - } - v = append(v, fmt.Sprintf("spec: %s", specs.Version)) - app.Version = strings.Join(v, "\n") - app.Flags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug", - Usage: "enable debug output for logging", - }, - cli.StringFlag{ - Name: "log", - Value: "/dev/null", - Usage: "set the log file path where internal debug information is written", - }, - cli.StringFlag{ - Name: "log-format", - Value: "text", - Usage: "set the format used by logs ('text' (default), or 'json')", - }, - cli.StringFlag{ - Name: "root", - Value: "/run/runc", - Usage: "root directory for storage of container state (this should be located in tmpfs)", - }, - cli.StringFlag{ - Name: "criu", - Value: "criu", - Usage: "path to the criu binary used for checkpoint and restore", - }, - cli.BoolFlag{ - Name: "systemd-cgroup", - Usage: "enable systemd cgroup support, expects cgroupsPath to be of form \"slice:prefix:name\" for e.g. \"system.slice:runc:434234\"", - }, - } - app.Commands = []cli.Command{ - checkpointCommand, - deleteCommand, - eventsCommand, - execCommand, - initCommand, - killCommand, - listCommand, - pauseCommand, - restoreCommand, - resumeCommand, - specCommand, - startCommand, - stateCommand, - } - app.Before = func(context *cli.Context) error { - if context.GlobalBool("debug") { - logrus.SetLevel(logrus.DebugLevel) - } - if path := context.GlobalString("log"); path != "" { - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) - if err != nil { - return err - } - logrus.SetOutput(f) - } - switch context.GlobalString("log-format") { - case "text": - // retain logrus's default. - case "json": - logrus.SetFormatter(new(logrus.JSONFormatter)) - default: - logrus.Fatalf("unknown log-format %q", context.GlobalString("log-format")) - } - return nil - } - if err := app.Run(os.Args); err != nil { - fatal(err) - } -} diff --git a/vendor/github.com/opencontainers/runc/main_unix.go b/vendor/github.com/opencontainers/runc/main_unix.go deleted file mode 100644 index 7bbec9fa..00000000 --- a/vendor/github.com/opencontainers/runc/main_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build linux - -package main - -import _ "github.com/opencontainers/runc/libcontainer/nsenter" diff --git a/vendor/github.com/opencontainers/runc/main_unsupported.go b/vendor/github.com/opencontainers/runc/main_unsupported.go deleted file mode 100644 index 32ce1ac9..00000000 --- a/vendor/github.com/opencontainers/runc/main_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package main - -import "github.com/codegangsta/cli" - -var ( - checkpointCommand cli.Command - eventsCommand cli.Command - restoreCommand cli.Command - specCommand cli.Command - killCommand cli.Command -) diff --git a/vendor/github.com/opencontainers/runc/pause.go b/vendor/github.com/opencontainers/runc/pause.go deleted file mode 100644 index ccec7bbe..00000000 --- a/vendor/github.com/opencontainers/runc/pause.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux - -package main - -import "github.com/codegangsta/cli" - -var pauseCommand = cli.Command{ - Name: "pause", - Usage: "pause suspends all processes inside the container", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -paused. `, - Description: `The pause command suspends all processes in the instance of the container. - -Use runc list to identiy instances of containers and their current status.`, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - if err := container.Pause(); err != nil { - fatal(err) - } - }, -} - -var resumeCommand = cli.Command{ - Name: "resume", - Usage: "resumes all processes that have been previously paused", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -resumed.`, - Description: `The resume command resumes all processes in the instance of the container. - -Use runc list to identiy instances of containers and their current status.`, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - if err := container.Resume(); err != nil { - fatal(err) - } - }, -} diff --git a/vendor/github.com/opencontainers/runc/restore.go b/vendor/github.com/opencontainers/runc/restore.go deleted file mode 100644 index 13f3b6d2..00000000 --- a/vendor/github.com/opencontainers/runc/restore.go +++ /dev/null @@ -1,191 +0,0 @@ -// +build linux - -package main - -import ( - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/specconv" - "github.com/opencontainers/runtime-spec/specs-go" -) - -var restoreCommand = cli.Command{ - Name: "restore", - Usage: "restore a container from a previous checkpoint", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -restored.`, - Description: `Restores the saved state of the container instance that was previously saved -using the runc checkpoint command.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "image-path", - Value: "", - Usage: "path to criu image files for restoring", - }, - cli.StringFlag{ - Name: "work-path", - Value: "", - Usage: "path for saving work files and logs", - }, - cli.BoolFlag{ - Name: "tcp-established", - Usage: "allow open tcp connections", - }, - cli.BoolFlag{ - Name: "ext-unix-sk", - Usage: "allow external unix sockets", - }, - cli.BoolFlag{ - Name: "shell-job", - Usage: "allow shell jobs", - }, - cli.BoolFlag{ - Name: "file-locks", - Usage: "handle file locks, for safety", - }, - cli.StringFlag{ - Name: "manage-cgroups-mode", - Value: "", - Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'.", - }, - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: "path to the root of the bundle directory", - }, - cli.BoolFlag{ - Name: "detach,d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - }, - cli.BoolFlag{ - Name: "no-pivot", - Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk", - }, - }, - Action: func(context *cli.Context) { - imagePath := context.String("image-path") - id := context.Args().First() - if id == "" { - fatal(errEmptyID) - } - if imagePath == "" { - imagePath = getDefaultImagePath(context) - } - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - fatal(err) - } - } - spec, err := loadSpec(specConfig) - if err != nil { - fatal(err) - } - config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ - CgroupName: id, - UseSystemdCgroup: context.GlobalBool("systemd-cgroup"), - NoPivotRoot: context.Bool("no-pivot"), - Spec: spec, - }) - if err != nil { - fatal(err) - } - status, err := restoreContainer(context, spec, config, imagePath) - if err != nil { - fatal(err) - } - os.Exit(status) - }, -} - -func restoreContainer(context *cli.Context, spec *specs.Spec, config *configs.Config, imagePath string) (code int, err error) { - var ( - rootuid = 0 - id = context.Args().First() - ) - factory, err := loadFactory(context) - if err != nil { - return -1, err - } - container, err := factory.Load(id) - if err != nil { - container, err = factory.Create(id, config) - if err != nil { - return -1, err - } - } - options := criuOptions(context) - - status, err := container.Status() - if err != nil { - logrus.Error(err) - } - if status == libcontainer.Running { - fatalf("Container with id %s already running", id) - } - - setManageCgroupsMode(context, options) - - // ensure that the container is always removed if we were the process - // that created it. - detach := context.Bool("detach") - if !detach { - defer destroy(container) - } - process := &libcontainer.Process{} - tty, err := setupIO(process, rootuid, "", false, detach) - if err != nil { - return -1, err - } - defer tty.Close() - handler := newSignalHandler(tty, !context.Bool("no-subreaper")) - if err := container.Restore(process, options); err != nil { - return -1, err - } - if err := tty.ClosePostStart(); err != nil { - return -1, err - } - if pidFile := context.String("pid-file"); pidFile != "" { - if err := createPidFile(pidFile, process); err != nil { - process.Signal(syscall.SIGKILL) - process.Wait() - return -1, err - } - } - if detach { - return 0, nil - } - return handler.forward(process) -} - -func criuOptions(context *cli.Context) *libcontainer.CriuOpts { - imagePath := getCheckpointImagePath(context) - if err := os.MkdirAll(imagePath, 0655); err != nil { - fatal(err) - } - return &libcontainer.CriuOpts{ - ImagesDirectory: imagePath, - WorkDirectory: context.String("work-path"), - LeaveRunning: context.Bool("leave-running"), - TcpEstablished: context.Bool("tcp-established"), - ExternalUnixConnections: context.Bool("ext-unix-sk"), - ShellJob: context.Bool("shell-job"), - FileLocks: context.Bool("file-locks"), - } -} diff --git a/vendor/github.com/opencontainers/runc/rlimit_linux.go b/vendor/github.com/opencontainers/runc/rlimit_linux.go deleted file mode 100644 index 0de8b0b2..00000000 --- a/vendor/github.com/opencontainers/runc/rlimit_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import "fmt" - -const ( - RLIMIT_CPU = iota // CPU time in sec - RLIMIT_FSIZE // Maximum filesize - RLIMIT_DATA // max data size - RLIMIT_STACK // max stack size - RLIMIT_CORE // max core file size - RLIMIT_RSS // max resident set size - RLIMIT_NPROC // max number of processes - RLIMIT_NOFILE // max number of open files - RLIMIT_MEMLOCK // max locked-in-memory address space - RLIMIT_AS // address space limit - RLIMIT_LOCKS // maximum file locks held - RLIMIT_SIGPENDING // max number of pending signals - RLIMIT_MSGQUEUE // maximum bytes in POSIX mqueues - RLIMIT_NICE // max nice prio allowed to raise to - RLIMIT_RTPRIO // maximum realtime priority - RLIMIT_RTTIME // timeout for RT tasks in us -) - -var rlimitMap = map[string]int{ - "RLIMIT_CPU": RLIMIT_CPU, - "RLIMIT_FSIZE": RLIMIT_FSIZE, - "RLIMIT_DATA": RLIMIT_DATA, - "RLIMIT_STACK": RLIMIT_STACK, - "RLIMIT_CORE": RLIMIT_CORE, - "RLIMIT_RSS": RLIMIT_RSS, - "RLIMIT_NPROC": RLIMIT_NPROC, - "RLIMIT_NOFILE": RLIMIT_NOFILE, - "RLIMIT_MEMLOCK": RLIMIT_MEMLOCK, - "RLIMIT_AS": RLIMIT_AS, - "RLIMIT_LOCKS": RLIMIT_LOCKS, - "RLIMIT_SIGPENDING": RLIMIT_SIGPENDING, - "RLIMIT_MSGQUEUE": RLIMIT_MSGQUEUE, - "RLIMIT_NICE": RLIMIT_NICE, - "RLIMIT_RTPRIO": RLIMIT_RTPRIO, - "RLIMIT_RTTIME": RLIMIT_RTTIME, -} - -func strToRlimit(key string) (int, error) { - rl, ok := rlimitMap[key] - if !ok { - return 0, fmt.Errorf("Wrong rlimit value: %s", key) - } - return rl, nil -} diff --git a/vendor/github.com/opencontainers/runc/signals.go b/vendor/github.com/opencontainers/runc/signals.go deleted file mode 100644 index 5eee44e7..00000000 --- a/vendor/github.com/opencontainers/runc/signals.go +++ /dev/null @@ -1,116 +0,0 @@ -// +build linux - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/utils" -) - -const signalBufferSize = 2048 - -// newSignalHandler returns a signal handler for processing SIGCHLD and SIGWINCH signals -// while still forwarding all other signals to the process. -func newSignalHandler(tty *tty, enableSubreaper bool) *signalHandler { - if enableSubreaper { - // set us as the subreaper before registering the signal handler for the container - if err := system.SetSubreaper(1); err != nil { - logrus.Warn(err) - } - } - // ensure that we have a large buffer size so that we do not miss any signals - // incase we are not processing them fast enough. - s := make(chan os.Signal, signalBufferSize) - // handle all signals for the process. - signal.Notify(s) - return &signalHandler{ - tty: tty, - signals: s, - } -} - -// exit models a process exit status with the pid and -// exit status. -type exit struct { - pid int - status int -} - -type signalHandler struct { - signals chan os.Signal - tty *tty -} - -// forward handles the main signal event loop forwarding, resizing, or reaping depending -// on the signal received. -func (h *signalHandler) forward(process *libcontainer.Process) (int, error) { - // make sure we know the pid of our main process so that we can return - // after it dies. - pid1, err := process.Pid() - if err != nil { - return -1, err - } - // perform the initial tty resize. - h.tty.resize() - for s := range h.signals { - switch s { - case syscall.SIGWINCH: - h.tty.resize() - case syscall.SIGCHLD: - exits, err := h.reap() - if err != nil { - logrus.Error(err) - } - for _, e := range exits { - logrus.WithFields(logrus.Fields{ - "pid": e.pid, - "status": e.status, - }).Debug("process exited") - if e.pid == pid1 { - // call Wait() on the process even though we already have the exit - // status because we must ensure that any of the go specific process - // fun such as flushing pipes are complete before we return. - process.Wait() - return e.status, nil - } - } - default: - logrus.Debugf("sending signal to process %s", s) - if err := syscall.Kill(pid1, s.(syscall.Signal)); err != nil { - logrus.Error(err) - } - } - } - return -1, nil -} - -// reap runs wait4 in a loop until we have finished processing any existing exits -// then returns all exits to the main event loop for further processing. -func (h *signalHandler) reap() (exits []exit, err error) { - var ( - ws syscall.WaitStatus - rus syscall.Rusage - ) - for { - pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus) - if err != nil { - if err == syscall.ECHILD { - return exits, nil - } - return nil, err - } - if pid <= 0 { - return exits, nil - } - exits = append(exits, exit{ - pid: pid, - status: utils.ExitStatus(ws), - }) - } -} diff --git a/vendor/github.com/opencontainers/runc/spec.go b/vendor/github.com/opencontainers/runc/spec.go deleted file mode 100644 index 69ea1e50..00000000 --- a/vendor/github.com/opencontainers/runc/spec.go +++ /dev/null @@ -1,249 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runtime-spec/specs-go" -) - -var specCommand = cli.Command{ - Name: "spec", - Usage: "create a new specification file", - ArgsUsage: "", - Description: `The spec command creates the new specification file named "` + specConfig + `" for -the bundle. - -The spec generated is just a starter file. Editing of the spec is required to -achieve desired results. For example, the newly generated spec includes an args -parameter that is initially set to call the "sh" command when the container is -started. Calling "sh" may work for an ubuntu container or busybox, but will not -work for containers that do not include the "sh" program. - -EXAMPLE: - To run docker's hello-world container one needs to set the args parameter -in the spec to call hello. This can be done using the sed command or a text -editor. The following commands create a bundle for hello-world, change the -default args parameter in the spec from "sh" to "/hello", then run the hello -command in a new hello-world container named container1: - - mkdir hello - cd hello - docker pull hello-world - docker export $(docker create hello-world) > hello-world.tar - mkdir rootfs - tar -C rootfs -xf hello-world.tar - runc spec - sed -i 's;"sh";"/hello";' ` + specConfig + ` - runc start container1 - -In the start command above, "container1" is the name for the instance of the -container that you are starting. The name you provide for the container instance -must be unique on your host. - -When starting a container through runc, runc needs root privilege. If not -already running as root, you can use sudo to give runc root privilege. For -example: "sudo runc start container1" will give runc root privilege to start the -container on your host.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: "path to the root of the bundle directory", - }, - }, - Action: func(context *cli.Context) { - spec := specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - Root: specs.Root{ - Path: "rootfs", - Readonly: true, - }, - Process: specs.Process{ - Terminal: true, - User: specs.User{}, - Args: []string{ - "sh", - }, - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM=xterm", - }, - Cwd: "/", - NoNewPrivileges: true, - Capabilities: []string{ - "CAP_AUDIT_WRITE", - "CAP_KILL", - "CAP_NET_BIND_SERVICE", - }, - Rlimits: []specs.Rlimit{ - { - Type: "RLIMIT_NOFILE", - Hard: uint64(1024), - Soft: uint64(1024), - }, - }, - }, - Hostname: "runc", - Mounts: []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: nil, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, - }, - }, - Linux: specs.Linux{ - MaskedPaths: []string{ - "/proc/kcore", - "/proc/latency_stats", - "/proc/timer_stats", - "/proc/sched_debug", - }, - ReadonlyPaths: []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - }, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ - { - Allow: false, - Access: sPtr("rwm"), - }, - }, - }, - Namespaces: []specs.Namespace{ - { - Type: "pid", - }, - { - Type: "network", - }, - { - Type: "ipc", - }, - { - Type: "uts", - }, - { - Type: "mount", - }, - }, - }, - } - - checkNoFile := func(name string) error { - _, err := os.Stat(name) - if err == nil { - return fmt.Errorf("File %s exists. Remove it first", name) - } - if !os.IsNotExist(err) { - return err - } - return nil - } - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - fatal(err) - } - } - if err := checkNoFile(specConfig); err != nil { - fatal(err) - } - data, err := json.MarshalIndent(&spec, "", "\t") - if err != nil { - fatal(err) - } - if err := ioutil.WriteFile(specConfig, data, 0666); err != nil { - fatal(err) - } - }, -} - -func sPtr(s string) *string { return &s } -func rPtr(r rune) *rune { return &r } -func iPtr(i int64) *int64 { return &i } -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -// loadSpec loads the specification from the provided path. -// If the path is empty then the default path will be "config.json" -func loadSpec(cPath string) (spec *specs.Spec, err error) { - cf, err := os.Open(cPath) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("JSON specification file %s not found", cPath) - } - return nil, err - } - defer cf.Close() - - if err = json.NewDecoder(cf).Decode(&spec); err != nil { - return nil, err - } - return spec, validateProcessSpec(&spec.Process) -} - -func createLibContainerRlimit(rlimit specs.Rlimit) (configs.Rlimit, error) { - rl, err := strToRlimit(rlimit.Type) - if err != nil { - return configs.Rlimit{}, err - } - return configs.Rlimit{ - Type: rl, - Hard: uint64(rlimit.Hard), - Soft: uint64(rlimit.Soft), - }, nil -} diff --git a/vendor/github.com/opencontainers/runc/start.go b/vendor/github.com/opencontainers/runc/start.go deleted file mode 100644 index dcdcb324..00000000 --- a/vendor/github.com/opencontainers/runc/start.go +++ /dev/null @@ -1,138 +0,0 @@ -// +build linux - -package main - -import ( - "os" - "runtime" - - "github.com/codegangsta/cli" - "github.com/coreos/go-systemd/activation" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// default action is to start a container -var startCommand = cli.Command{ - Name: "start", - Usage: "create and run a container", - ArgsUsage: ` - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host.`, - Description: `The start command creates an instance of a container for a bundle. The bundle -is a directory with a specification file named "` + specConfig + `" and a root -filesystem. - -The specification file includes an args parameter. The args parameter is used -to specify command(s) that get run when the container is started. To change the -command(s) that get executed on start, edit the args parameter of the spec. See -"runc spec --help" for more explanation.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: `path to the root of the bundle directory, defaults to the current directory`, - }, - cli.StringFlag{ - Name: "console", - Value: "", - Usage: "specify the pty slave path for use with the container", - }, - cli.BoolFlag{ - Name: "detach,d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - }, - cli.BoolFlag{ - Name: "no-pivot", - Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk", - }, - }, - Action: func(context *cli.Context) { - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - fatal(err) - } - } - spec, err := loadSpec(specConfig) - if err != nil { - fatal(err) - } - - notifySocket := os.Getenv("NOTIFY_SOCKET") - if notifySocket != "" { - setupSdNotify(spec, notifySocket) - } - - if os.Geteuid() != 0 { - fatalf("runc should be run as root") - } - - status, err := startContainer(context, spec) - if err != nil { - fatal(err) - } - // exit with the container's exit status so any external supervisor is - // notified of the exit with the correct exit status. - os.Exit(status) - }, -} - -func init() { - if len(os.Args) > 1 && os.Args[1] == "init" { - runtime.GOMAXPROCS(1) - runtime.LockOSThread() - } -} - -var initCommand = cli.Command{ - Name: "init", - Usage: `initialize the namespaces and launch the process (do not call it outside of runc)`, - Action: func(context *cli.Context) { - factory, _ := libcontainer.New("") - if err := factory.StartInitialization(); err != nil { - // as the error is sent back to the parent there is no need to log - // or write it to stderr because the parent process will handle this - os.Exit(1) - } - panic("libcontainer: container init failed to exec") - }, -} - -func startContainer(context *cli.Context, spec *specs.Spec) (int, error) { - id := context.Args().First() - if id == "" { - return -1, errEmptyID - } - container, err := createContainer(context, id, spec) - if err != nil { - return -1, err - } - detach := context.Bool("detach") - // Support on-demand socket activation by passing file descriptors into the container init process. - listenFDs := []*os.File{} - if os.Getenv("LISTEN_FDS") != "" { - listenFDs = activation.Files(false) - } - r := &runner{ - enableSubreaper: !context.Bool("no-subreaper"), - shouldDestroy: true, - container: container, - listenFDs: listenFDs, - console: context.String("console"), - detach: detach, - pidFile: context.String("pid-file"), - } - return r.run(&spec.Process) -} diff --git a/vendor/github.com/opencontainers/runc/state.go b/vendor/github.com/opencontainers/runc/state.go deleted file mode 100644 index 50cb5d95..00000000 --- a/vendor/github.com/opencontainers/runc/state.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "os" - "time" - - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer/utils" -) - -// cState represents the platform agnostic pieces relating to a running -// container's status and state. Note: The fields in this structure adhere to -// the opencontainers/runtime-spec/specs-go requirement for json fields that must be returned -// in a state command. -type cState struct { - // Version is the OCI version for the container - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundlePath"` - // Rootfs is a path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfsPath"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` -} - -var stateCommand = cli.Command{ - Name: "state", - Usage: "output the state of a container", - ArgsUsage: ` - -Where "" is your name for the instance of the container.`, - Description: `The state command outputs current state information for the -instance of a container.`, - Action: func(context *cli.Context) { - container, err := getContainer(context) - if err != nil { - fatal(err) - } - containerStatus, err := container.Status() - if err != nil { - fatal(err) - } - state, err := container.State() - if err != nil { - fatal(err) - } - cs := cState{ - Version: state.BaseState.Config.Version, - ID: state.BaseState.ID, - InitProcessPid: state.BaseState.InitProcessPid, - Status: containerStatus.String(), - Bundle: utils.SearchLabels(state.Config.Labels, "bundle"), - Rootfs: state.BaseState.Config.Rootfs, - Created: state.BaseState.Created} - data, err := json.MarshalIndent(cs, "", " ") - if err != nil { - fatal(err) - } - os.Stdout.Write(data) - }, -} diff --git a/vendor/github.com/opencontainers/runc/tty.go b/vendor/github.com/opencontainers/runc/tty.go deleted file mode 100644 index 80c65515..00000000 --- a/vendor/github.com/opencontainers/runc/tty.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/docker/docker/pkg/term" - "github.com/opencontainers/runc/libcontainer" -) - -// setup standard pipes so that the TTY of the calling runc process -// is not inherited by the container. -func createStdioPipes(p *libcontainer.Process, rootuid int) (*tty, error) { - i, err := p.InitializeIO(rootuid) - if err != nil { - return nil, err - } - t := &tty{ - closers: []io.Closer{ - i.Stdin, - i.Stdout, - i.Stderr, - }, - } - // add the process's io to the post start closers if they support close - for _, cc := range []interface{}{ - p.Stdin, - p.Stdout, - p.Stderr, - } { - if c, ok := cc.(io.Closer); ok { - t.postStart = append(t.postStart, c) - } - } - go func() { - io.Copy(i.Stdin, os.Stdin) - i.Stdin.Close() - }() - t.wg.Add(2) - go t.copyIO(os.Stdout, i.Stdout) - go t.copyIO(os.Stderr, i.Stderr) - return t, nil -} - -func (t *tty) copyIO(w io.Writer, r io.ReadCloser) { - defer t.wg.Done() - io.Copy(w, r) - r.Close() -} - -func createTty(p *libcontainer.Process, rootuid int, consolePath string) (*tty, error) { - if consolePath != "" { - if err := p.ConsoleFromPath(consolePath); err != nil { - return nil, err - } - return &tty{}, nil - } - console, err := p.NewConsole(rootuid) - if err != nil { - return nil, err - } - go io.Copy(console, os.Stdin) - go io.Copy(os.Stdout, console) - - state, err := term.SetRawTerminal(os.Stdin.Fd()) - if err != nil { - return nil, fmt.Errorf("failed to set the terminal from the stdin: %v", err) - } - return &tty{ - console: console, - state: state, - closers: []io.Closer{ - console, - }, - }, nil -} - -type tty struct { - console libcontainer.Console - state *term.State - closers []io.Closer - postStart []io.Closer - wg sync.WaitGroup -} - -// ClosePostStart closes any fds that are provided to the container and dup2'd -// so that we no longer have copy in our process. -func (t *tty) ClosePostStart() error { - for _, c := range t.postStart { - c.Close() - } - return nil -} - -// Close closes all open fds for the tty and/or restores the orignal -// stdin state to what it was prior to the container execution -func (t *tty) Close() error { - // ensure that our side of the fds are always closed - for _, c := range t.postStart { - c.Close() - } - // wait for the copy routines to finish before closing the fds - t.wg.Wait() - for _, c := range t.closers { - c.Close() - } - if t.state != nil { - term.RestoreTerminal(os.Stdin.Fd(), t.state) - } - return nil -} - -func (t *tty) resize() error { - if t.console == nil { - return nil - } - ws, err := term.GetWinsize(os.Stdin.Fd()) - if err != nil { - return err - } - return term.SetWinsize(t.console.Fd(), ws) -} diff --git a/vendor/github.com/opencontainers/runc/utils.go b/vendor/github.com/opencontainers/runc/utils.go deleted file mode 100644 index e34d6558..00000000 --- a/vendor/github.com/opencontainers/runc/utils.go +++ /dev/null @@ -1,287 +0,0 @@ -// +build linux - -package main - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/codegangsta/cli" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/cgroups/systemd" - "github.com/opencontainers/runc/libcontainer/specconv" - "github.com/opencontainers/runtime-spec/specs-go" -) - -var errEmptyID = errors.New("container id cannot be empty") - -var container libcontainer.Container - -// loadFactory returns the configured factory instance for execing containers. -func loadFactory(context *cli.Context) (libcontainer.Factory, error) { - root := context.GlobalString("root") - abs, err := filepath.Abs(root) - if err != nil { - return nil, err - } - cgroupManager := libcontainer.Cgroupfs - if context.GlobalBool("systemd-cgroup") { - if systemd.UseSystemd() { - cgroupManager = libcontainer.SystemdCgroups - } else { - return nil, fmt.Errorf("systemd cgroup flag passed, but systemd support for managing cgroups is not available.") - } - } - return libcontainer.New(abs, cgroupManager, func(l *libcontainer.LinuxFactory) error { - l.CriuPath = context.GlobalString("criu") - return nil - }) -} - -// getContainer returns the specified container instance by loading it from state -// with the default factory. -func getContainer(context *cli.Context) (libcontainer.Container, error) { - id := context.Args().First() - if id == "" { - return nil, errEmptyID - } - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - return factory.Load(id) -} - -// fatal prints the error's details if it is a libcontainer specific error type -// then exits the program with an exit status of 1. -func fatal(err error) { - // make sure the error is written to the logger - logrus.Error(err) - fmt.Fprintln(os.Stderr, err) - os.Exit(1) -} - -func fatalf(t string, v ...interface{}) { - fatal(fmt.Errorf(t, v...)) -} - -func getDefaultImagePath(context *cli.Context) string { - cwd, err := os.Getwd() - if err != nil { - panic(err) - } - return filepath.Join(cwd, "checkpoint") -} - -// newProcess returns a new libcontainer Process with the arguments from the -// spec and stdio from the current process. -func newProcess(p specs.Process) (*libcontainer.Process, error) { - lp := &libcontainer.Process{ - Args: p.Args, - Env: p.Env, - // TODO: fix libcontainer's API to better support uid/gid in a typesafe way. - User: fmt.Sprintf("%d:%d", p.User.UID, p.User.GID), - Cwd: p.Cwd, - Capabilities: p.Capabilities, - Label: p.SelinuxLabel, - NoNewPrivileges: &p.NoNewPrivileges, - AppArmorProfile: p.ApparmorProfile, - } - for _, rlimit := range p.Rlimits { - rl, err := createLibContainerRlimit(rlimit) - if err != nil { - return nil, err - } - lp.Rlimits = append(lp.Rlimits, rl) - } - return lp, nil -} - -func dupStdio(process *libcontainer.Process, rootuid int) error { - process.Stdin = os.Stdin - process.Stdout = os.Stdout - process.Stderr = os.Stderr - for _, fd := range []uintptr{ - os.Stdin.Fd(), - os.Stdout.Fd(), - os.Stderr.Fd(), - } { - if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil { - return err - } - } - return nil -} - -// If systemd is supporting sd_notify protocol, this function will add support -// for sd_notify protocol from within the container. -func setupSdNotify(spec *specs.Spec, notifySocket string) { - spec.Mounts = append(spec.Mounts, specs.Mount{Destination: notifySocket, Type: "bind", Source: notifySocket, Options: []string{"bind"}}) - spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notifySocket)) -} - -func destroy(container libcontainer.Container) { - if err := container.Destroy(); err != nil { - logrus.Error(err) - } -} - -// setupIO sets the proper IO on the process depending on the configuration -// If there is a nil error then there must be a non nil tty returned -func setupIO(process *libcontainer.Process, rootuid int, console string, createTTY, detach bool) (*tty, error) { - // detach and createTty will not work unless a console path is passed - // so error out here before changing any terminal settings - if createTTY && detach && console == "" { - return nil, fmt.Errorf("cannot allocate tty if runc will detach") - } - if createTTY { - return createTty(process, rootuid, console) - } - if detach { - if err := dupStdio(process, rootuid); err != nil { - return nil, err - } - return &tty{}, nil - } - return createStdioPipes(process, rootuid) -} - -// createPidFile creates a file with the processes pid inside it atomically -// it creates a temp file with the paths filename + '.' infront of it -// then renames the file -func createPidFile(path string, process *libcontainer.Process) error { - pid, err := process.Pid() - if err != nil { - return err - } - var ( - tmpDir = filepath.Dir(path) - tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path))) - ) - f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) - if err != nil { - return err - } - _, err = fmt.Fprintf(f, "%d", pid) - f.Close() - if err != nil { - return err - } - return os.Rename(tmpName, path) -} - -func createContainer(context *cli.Context, id string, spec *specs.Spec) (libcontainer.Container, error) { - config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ - CgroupName: id, - UseSystemdCgroup: context.GlobalBool("systemd-cgroup"), - NoPivotRoot: context.Bool("no-pivot"), - Spec: spec, - }) - if err != nil { - return nil, err - } - - if _, err := os.Stat(config.Rootfs); err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("rootfs (%q) does not exist", config.Rootfs) - } - return nil, err - } - - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - return factory.Create(id, config) -} - -type runner struct { - enableSubreaper bool - shouldDestroy bool - detach bool - listenFDs []*os.File - pidFile string - console string - container libcontainer.Container -} - -func (r *runner) run(config *specs.Process) (int, error) { - process, err := newProcess(*config) - if err != nil { - r.destroy() - return -1, err - } - if len(r.listenFDs) > 0 { - process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(r.listenFDs)), "LISTEN_PID=1") - process.ExtraFiles = append(process.ExtraFiles, r.listenFDs...) - } - rootuid, err := r.container.Config().HostUID() - if err != nil { - r.destroy() - return -1, err - } - tty, err := setupIO(process, rootuid, r.console, config.Terminal, r.detach) - if err != nil { - r.destroy() - return -1, err - } - handler := newSignalHandler(tty, r.enableSubreaper) - if err := r.container.Start(process); err != nil { - r.destroy() - tty.Close() - return -1, err - } - if err := tty.ClosePostStart(); err != nil { - r.terminate(process) - r.destroy() - tty.Close() - return -1, err - } - if r.pidFile != "" { - if err := createPidFile(r.pidFile, process); err != nil { - r.terminate(process) - r.destroy() - tty.Close() - return -1, err - } - } - if r.detach { - tty.Close() - return 0, nil - } - status, err := handler.forward(process) - if err != nil { - r.terminate(process) - } - r.destroy() - tty.Close() - return status, err -} - -func (r *runner) destroy() { - if r.shouldDestroy { - destroy(r.container) - } -} - -func (r *runner) terminate(p *libcontainer.Process) { - p.Signal(syscall.SIGKILL) - p.Wait() -} - -func validateProcessSpec(spec *specs.Process) error { - if spec.Cwd == "" { - return fmt.Errorf("Cwd property must not be empty") - } - if !filepath.IsAbs(spec.Cwd) { - return fmt.Errorf("Cwd must be an absolute path") - } - if len(spec.Args) == 0 { - return fmt.Errorf("args must not be empty") - } - return nil -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 588ceca1..00000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.3 - - 1.5.4 - - 1.6.2 - - 1.7.1 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/BUILD.bazel b/vendor/github.com/pkg/errors/BUILD.bazel deleted file mode 100644 index c9b58edf..00000000 --- a/vendor/github.com/pkg/errors/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "stack.go", - ], - importpath = "github.com/pkg/errors", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "bench_test.go", - "errors_test.go", - "format_test.go", - "stack_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/pkg/errors", -) - -go_test( - name = "go_default_xtest", - srcs = ["example_test.go"], - importpath = "github.com/pkg/errors_test", - deps = [":go_default_library"], -) diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 273db3c9..00000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## Licence - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade..00000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go deleted file mode 100644 index 0416a3cb..00000000 --- a/vendor/github.com/pkg/errors/bench_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build go1.7 - -package errors - -import ( - "fmt" - "testing" - - stderrors "errors" -) - -func noErrors(at, depth int) error { - if at >= depth { - return stderrors.New("no error") - } - return noErrors(at+1, depth) -} -func yesErrors(at, depth int) error { - if at >= depth { - return New("ye error") - } - return yesErrors(at+1, depth) -} - -func BenchmarkErrors(b *testing.B) { - var toperr error - type run struct { - stack int - std bool - } - runs := []run{ - {10, false}, - {10, true}, - {100, false}, - {100, true}, - {1000, false}, - {1000, true}, - } - for _, r := range runs { - part := "pkg/errors" - if r.std { - part = "errors" - } - name := fmt.Sprintf("%s-stack-%d", part, r.stack) - b.Run(name, func(b *testing.B) { - var err error - f := yesErrors - if r.std { - f = noErrors - } - b.ReportAllocs() - for i := 0; i < b.N; i++ { - err = f(0, r.stack) - } - b.StopTimer() - toperr = err - }) - } -} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go deleted file mode 100644 index 1d8c6355..00000000 --- a/vendor/github.com/pkg/errors/errors_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package errors - -import ( - "errors" - "fmt" - "io" - "reflect" - "testing" -) - -func TestNew(t *testing.T) { - tests := []struct { - err string - want error - }{ - {"", fmt.Errorf("")}, - {"foo", fmt.Errorf("foo")}, - {"foo", New("foo")}, - {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, - } - - for _, tt := range tests { - got := New(tt.err) - if got.Error() != tt.want.Error() { - t.Errorf("New.Error(): got: %q, want %q", got, tt.want) - } - } -} - -func TestWrapNil(t *testing.T) { - got := Wrap(nil, "no error") - if got != nil { - t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWrap(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, - } - - for _, tt := range tests { - got := Wrap(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) - } - } -} - -type nilError struct{} - -func (nilError) Error() string { return "nil error" } - -func TestCause(t *testing.T) { - x := New("error") - tests := []struct { - err error - want error - }{{ - // nil error is nil - err: nil, - want: nil, - }, { - // explicit nil error is nil - err: (error)(nil), - want: nil, - }, { - // typed nil is nil - err: (*nilError)(nil), - want: (*nilError)(nil), - }, { - // uncaused error is unaffected - err: io.EOF, - want: io.EOF, - }, { - // caused error returns cause - err: Wrap(io.EOF, "ignored"), - want: io.EOF, - }, { - err: x, // return from errors.New - want: x, - }, { - WithMessage(nil, "whoops"), - nil, - }, { - WithMessage(io.EOF, "whoops"), - io.EOF, - }, { - WithStack(nil), - nil, - }, { - WithStack(io.EOF), - io.EOF, - }} - - for i, tt := range tests { - got := Cause(tt.err) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) - } - } -} - -func TestWrapfNil(t *testing.T) { - got := Wrapf(nil, "no error") - if got != nil { - t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWrapf(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, - {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, - } - - for _, tt := range tests { - got := Wrapf(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) - } - } -} - -func TestErrorf(t *testing.T) { - tests := []struct { - err error - want string - }{ - {Errorf("read error without format specifiers"), "read error without format specifiers"}, - {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, - } - - for _, tt := range tests { - got := tt.err.Error() - if got != tt.want { - t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) - } - } -} - -func TestWithStackNil(t *testing.T) { - got := WithStack(nil) - if got != nil { - t.Errorf("WithStack(nil): got %#v, expected nil", got) - } -} - -func TestWithStack(t *testing.T) { - tests := []struct { - err error - want string - }{ - {io.EOF, "EOF"}, - {WithStack(io.EOF), "EOF"}, - } - - for _, tt := range tests { - got := WithStack(tt.err).Error() - if got != tt.want { - t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) - } - } -} - -func TestWithMessageNil(t *testing.T) { - got := WithMessage(nil, "no error") - if got != nil { - t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWithMessage(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, - } - - for _, tt := range tests { - got := WithMessage(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) - } - } - -} - -// errors.New, etc values are not expected to be compared by value -// but the change in errors#27 made them incomparable. Assert that -// various kinds of errors have a functional equality operator, even -// if the result of that equality is always false. -func TestErrorEquality(t *testing.T) { - vals := []error{ - nil, - io.EOF, - errors.New("EOF"), - New("EOF"), - Errorf("EOF"), - Wrap(io.EOF, "EOF"), - Wrapf(io.EOF, "EOF%d", 2), - WithMessage(nil, "whoops"), - WithMessage(io.EOF, "whoops"), - WithStack(io.EOF), - WithStack(nil), - } - - for i := range vals { - for j := range vals { - _ = vals[i] == vals[j] // mustn't panic - } - } -} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go deleted file mode 100644 index c1fc13e3..00000000 --- a/vendor/github.com/pkg/errors/example_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package errors_test - -import ( - "fmt" - - "github.com/pkg/errors" -) - -func ExampleNew() { - err := errors.New("whoops") - fmt.Println(err) - - // Output: whoops -} - -func ExampleNew_printf() { - err := errors.New("whoops") - fmt.Printf("%+v", err) - - // Example output: - // whoops - // github.com/pkg/errors_test.ExampleNew_printf - // /home/dfc/src/github.com/pkg/errors/example_test.go:17 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 -} - -func ExampleWithMessage() { - cause := errors.New("whoops") - err := errors.WithMessage(cause, "oh noes") - fmt.Println(err) - - // Output: oh noes: whoops -} - -func ExampleWithStack() { - cause := errors.New("whoops") - err := errors.WithStack(cause) - fmt.Println(err) - - // Output: whoops -} - -func ExampleWithStack_printf() { - cause := errors.New("whoops") - err := errors.WithStack(cause) - fmt.Printf("%+v", err) - - // Example Output: - // whoops - // github.com/pkg/errors_test.ExampleWithStack_printf - // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 - // testing.runExample - // /usr/lib/go/src/testing/example.go:114 - // testing.RunExamples - // /usr/lib/go/src/testing/example.go:38 - // testing.(*M).Run - // /usr/lib/go/src/testing/testing.go:744 - // main.main - // github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /usr/lib/go/src/runtime/proc.go:183 - // runtime.goexit - // /usr/lib/go/src/runtime/asm_amd64.s:2086 - // github.com/pkg/errors_test.ExampleWithStack_printf - // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 - // testing.runExample - // /usr/lib/go/src/testing/example.go:114 - // testing.RunExamples - // /usr/lib/go/src/testing/example.go:38 - // testing.(*M).Run - // /usr/lib/go/src/testing/testing.go:744 - // main.main - // github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /usr/lib/go/src/runtime/proc.go:183 - // runtime.goexit - // /usr/lib/go/src/runtime/asm_amd64.s:2086 -} - -func ExampleWrap() { - cause := errors.New("whoops") - err := errors.Wrap(cause, "oh noes") - fmt.Println(err) - - // Output: oh noes: whoops -} - -func fn() error { - e1 := errors.New("error") - e2 := errors.Wrap(e1, "inner") - e3 := errors.Wrap(e2, "middle") - return errors.Wrap(e3, "outer") -} - -func ExampleCause() { - err := fn() - fmt.Println(err) - fmt.Println(errors.Cause(err)) - - // Output: outer: middle: inner: error - // error -} - -func ExampleWrap_extended() { - err := fn() - fmt.Printf("%+v\n", err) - - // Example output: - // error - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:47 - // github.com/pkg/errors_test.ExampleCause_printf - // /home/dfc/src/github.com/pkg/errors/example_test.go:63 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:104 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer -} - -func ExampleWrapf() { - cause := errors.New("whoops") - err := errors.Wrapf(cause, "oh noes #%d", 2) - fmt.Println(err) - - // Output: oh noes #2: whoops -} - -func ExampleErrorf_extended() { - err := errors.Errorf("whoops: %s", "foo") - fmt.Printf("%+v", err) - - // Example output: - // whoops: foo - // github.com/pkg/errors_test.ExampleErrorf - // /home/dfc/src/github.com/pkg/errors/example_test.go:101 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:102 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 -} - -func Example_stackTrace() { - type stackTracer interface { - StackTrace() errors.StackTrace - } - - err, ok := errors.Cause(fn()).(stackTracer) - if !ok { - panic("oops, err does not implement stackTracer") - } - - st := err.StackTrace() - fmt.Printf("%+v", st[0:2]) // top two frames - - // Example output: - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:47 - // github.com/pkg/errors_test.Example_stackTrace - // /home/dfc/src/github.com/pkg/errors/example_test.go:127 -} - -func ExampleCause_printf() { - err := errors.Wrap(func() error { - return func() error { - return errors.Errorf("hello %s", fmt.Sprintf("world")) - }() - }(), "failed") - - fmt.Printf("%v", err) - - // Output: failed: hello world -} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go deleted file mode 100644 index 15fd7d89..00000000 --- a/vendor/github.com/pkg/errors/format_test.go +++ /dev/null @@ -1,535 +0,0 @@ -package errors - -import ( - "errors" - "fmt" - "io" - "regexp" - "strings" - "testing" -) - -func TestFormatNew(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - New("error"), - "%s", - "error", - }, { - New("error"), - "%v", - "error", - }, { - New("error"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatNew\n" + - "\t.+/github.com/pkg/errors/format_test.go:26", - }, { - New("error"), - "%q", - `"error"`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatErrorf(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Errorf("%s", "error"), - "%s", - "error", - }, { - Errorf("%s", "error"), - "%v", - "error", - }, { - Errorf("%s", "error"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatErrorf\n" + - "\t.+/github.com/pkg/errors/format_test.go:56", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWrap(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Wrap(New("error"), "error2"), - "%s", - "error2: error", - }, { - Wrap(New("error"), "error2"), - "%v", - "error2: error", - }, { - Wrap(New("error"), "error2"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:82", - }, { - Wrap(io.EOF, "error"), - "%s", - "error: EOF", - }, { - Wrap(io.EOF, "error"), - "%v", - "error: EOF", - }, { - Wrap(io.EOF, "error"), - "%+v", - "EOF\n" + - "error\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:96", - }, { - Wrap(Wrap(io.EOF, "error1"), "error2"), - "%+v", - "EOF\n" + - "error1\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:103\n", - }, { - Wrap(New("error with space"), "context"), - "%q", - `"context: error with space"`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWrapf(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Wrapf(io.EOF, "error%d", 2), - "%s", - "error2: EOF", - }, { - Wrapf(io.EOF, "error%d", 2), - "%v", - "error2: EOF", - }, { - Wrapf(io.EOF, "error%d", 2), - "%+v", - "EOF\n" + - "error2\n" + - "github.com/pkg/errors.TestFormatWrapf\n" + - "\t.+/github.com/pkg/errors/format_test.go:134", - }, { - Wrapf(New("error"), "error%d", 2), - "%s", - "error2: error", - }, { - Wrapf(New("error"), "error%d", 2), - "%v", - "error2: error", - }, { - Wrapf(New("error"), "error%d", 2), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatWrapf\n" + - "\t.+/github.com/pkg/errors/format_test.go:149", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWithStack(t *testing.T) { - tests := []struct { - error - format string - want []string - }{{ - WithStack(io.EOF), - "%s", - []string{"EOF"}, - }, { - WithStack(io.EOF), - "%v", - []string{"EOF"}, - }, { - WithStack(io.EOF), - "%+v", - []string{"EOF", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:175"}, - }, { - WithStack(New("error")), - "%s", - []string{"error"}, - }, { - WithStack(New("error")), - "%v", - []string{"error"}, - }, { - WithStack(New("error")), - "%+v", - []string{"error", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:189", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:189"}, - }, { - WithStack(WithStack(io.EOF)), - "%+v", - []string{"EOF", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:197", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:197"}, - }, { - WithStack(WithStack(Wrapf(io.EOF, "message"))), - "%+v", - []string{"EOF", - "message", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205"}, - }, { - WithStack(Errorf("error%d", 1)), - "%+v", - []string{"error1", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:216", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:216"}, - }} - - for i, tt := range tests { - testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) - } -} - -func TestFormatWithMessage(t *testing.T) { - tests := []struct { - error - format string - want []string - }{{ - WithMessage(New("error"), "error2"), - "%s", - []string{"error2: error"}, - }, { - WithMessage(New("error"), "error2"), - "%v", - []string{"error2: error"}, - }, { - WithMessage(New("error"), "error2"), - "%+v", - []string{ - "error", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:244", - "error2"}, - }, { - WithMessage(io.EOF, "addition1"), - "%s", - []string{"addition1: EOF"}, - }, { - WithMessage(io.EOF, "addition1"), - "%v", - []string{"addition1: EOF"}, - }, { - WithMessage(io.EOF, "addition1"), - "%+v", - []string{"EOF", "addition1"}, - }, { - WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), - "%v", - []string{"addition2: addition1: EOF"}, - }, { - WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), - "%+v", - []string{"EOF", "addition1", "addition2"}, - }, { - Wrap(WithMessage(io.EOF, "error1"), "error2"), - "%+v", - []string{"EOF", "error1", "error2", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:272"}, - }, { - WithMessage(Errorf("error%d", 1), "error2"), - "%+v", - []string{"error1", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:278", - "error2"}, - }, { - WithMessage(WithStack(io.EOF), "error"), - "%+v", - []string{ - "EOF", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:285", - "error"}, - }, { - WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), - "%+v", - []string{ - "EOF", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:293", - "inside-error", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:293", - "outside-error"}, - }} - - for i, tt := range tests { - testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) - } -} - -func TestFormatGeneric(t *testing.T) { - starts := []struct { - err error - want []string - }{ - {New("new-error"), []string{ - "new-error", - "github.com/pkg/errors.TestFormatGeneric\n" + - "\t.+/github.com/pkg/errors/format_test.go:315"}, - }, {Errorf("errorf-error"), []string{ - "errorf-error", - "github.com/pkg/errors.TestFormatGeneric\n" + - "\t.+/github.com/pkg/errors/format_test.go:319"}, - }, {errors.New("errors-new-error"), []string{ - "errors-new-error"}, - }, - } - - wrappers := []wrapper{ - { - func(err error) error { return WithMessage(err, "with-message") }, - []string{"with-message"}, - }, { - func(err error) error { return WithStack(err) }, - []string{ - "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + - ".+/github.com/pkg/errors/format_test.go:333", - }, - }, { - func(err error) error { return Wrap(err, "wrap-error") }, - []string{ - "wrap-error", - "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + - ".+/github.com/pkg/errors/format_test.go:339", - }, - }, { - func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, - []string{ - "wrapf-error1", - "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + - ".+/github.com/pkg/errors/format_test.go:346", - }, - }, - } - - for s := range starts { - err := starts[s].err - want := starts[s].want - testFormatCompleteCompare(t, s, err, "%+v", want, false) - testGenericRecursive(t, err, want, wrappers, 3) - } -} - -func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { - got := fmt.Sprintf(format, arg) - gotLines := strings.SplitN(got, "\n", -1) - wantLines := strings.SplitN(want, "\n", -1) - - if len(wantLines) > len(gotLines) { - t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) - return - } - - for i, w := range wantLines { - match, err := regexp.MatchString(w, gotLines[i]) - if err != nil { - t.Fatal(err) - } - if !match { - t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) - } - } -} - -var stackLineR = regexp.MustCompile(`\.`) - -// parseBlocks parses input into a slice, where: -// - incase entry contains a newline, its a stacktrace -// - incase entry contains no newline, its a solo line. -// -// Detecting stack boundaries only works incase the WithStack-calls are -// to be found on the same line, thats why it is optionally here. -// -// Example use: -// -// for _, e := range blocks { -// if strings.ContainsAny(e, "\n") { -// // Match as stack -// } else { -// // Match as line -// } -// } -// -func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { - var blocks []string - - stack := "" - wasStack := false - lines := map[string]bool{} // already found lines - - for _, l := range strings.Split(input, "\n") { - isStackLine := stackLineR.MatchString(l) - - switch { - case !isStackLine && wasStack: - blocks = append(blocks, stack, l) - stack = "" - lines = map[string]bool{} - case isStackLine: - if wasStack { - // Detecting two stacks after another, possible cause lines match in - // our tests due to WithStack(WithStack(io.EOF)) on same line. - if detectStackboundaries { - if lines[l] { - if len(stack) == 0 { - return nil, errors.New("len of block must not be zero here") - } - - blocks = append(blocks, stack) - stack = l - lines = map[string]bool{l: true} - continue - } - } - - stack = stack + "\n" + l - } else { - stack = l - } - lines[l] = true - case !isStackLine && !wasStack: - blocks = append(blocks, l) - default: - return nil, errors.New("must not happen") - } - - wasStack = isStackLine - } - - // Use up stack - if stack != "" { - blocks = append(blocks, stack) - } - return blocks, nil -} - -func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { - gotStr := fmt.Sprintf(format, arg) - - got, err := parseBlocks(gotStr, detectStackBoundaries) - if err != nil { - t.Fatal(err) - } - - if len(got) != len(want) { - t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", - n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) - } - - for i := range got { - if strings.ContainsAny(want[i], "\n") { - // Match as stack - match, err := regexp.MatchString(want[i], got[i]) - if err != nil { - t.Fatal(err) - } - if !match { - t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", - n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) - } - } else { - // Match as message - if got[i] != want[i] { - t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) - } - } - } -} - -type wrapper struct { - wrap func(err error) error - want []string -} - -func prettyBlocks(blocks []string, prefix ...string) string { - var out []string - - for _, b := range blocks { - out = append(out, fmt.Sprintf("%v", b)) - } - - return " " + strings.Join(out, "\n ") -} - -func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { - if len(beforeWant) == 0 { - panic("beforeWant must not be empty") - } - for _, w := range list { - if len(w.want) == 0 { - panic("want must not be empty") - } - - err := w.wrap(beforeErr) - - // Copy required cause append(beforeWant, ..) modified beforeWant subtly. - beforeCopy := make([]string, len(beforeWant)) - copy(beforeCopy, beforeWant) - - beforeWant := beforeCopy - last := len(beforeWant) - 1 - var want []string - - // Merge two stacks behind each other. - if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { - want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) - } else { - want = append(beforeWant, w.want...) - } - - testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) - if maxDepth > 0 { - testGenericRecursive(t, err, want, list, maxDepth-1) - } - } -} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go deleted file mode 100644 index 510c27a9..00000000 --- a/vendor/github.com/pkg/errors/stack_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package errors - -import ( - "fmt" - "runtime" - "testing" -) - -var initpc, _, _, _ = runtime.Caller(0) - -func TestFrameLine(t *testing.T) { - var tests = []struct { - Frame - want int - }{{ - Frame(initpc), - 9, - }, { - func() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) - }(), - 20, - }, { - func() Frame { - var pc, _, _, _ = runtime.Caller(1) - return Frame(pc) - }(), - 28, - }, { - Frame(0), // invalid PC - 0, - }} - - for _, tt := range tests { - got := tt.Frame.line() - want := tt.want - if want != got { - t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) - } - } -} - -type X struct{} - -func (x X) val() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) -} - -func (x *X) ptr() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) -} - -func TestFrameFormat(t *testing.T) { - var tests = []struct { - Frame - format string - want string - }{{ - Frame(initpc), - "%s", - "stack_test.go", - }, { - Frame(initpc), - "%+s", - "github.com/pkg/errors.init\n" + - "\t.+/github.com/pkg/errors/stack_test.go", - }, { - Frame(0), - "%s", - "unknown", - }, { - Frame(0), - "%+s", - "unknown", - }, { - Frame(initpc), - "%d", - "9", - }, { - Frame(0), - "%d", - "0", - }, { - Frame(initpc), - "%n", - "init", - }, { - func() Frame { - var x X - return x.ptr() - }(), - "%n", - `\(\*X\).ptr`, - }, { - func() Frame { - var x X - return x.val() - }(), - "%n", - "X.val", - }, { - Frame(0), - "%n", - "", - }, { - Frame(initpc), - "%v", - "stack_test.go:9", - }, { - Frame(initpc), - "%+v", - "github.com/pkg/errors.init\n" + - "\t.+/github.com/pkg/errors/stack_test.go:9", - }, { - Frame(0), - "%v", - "unknown:0", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) - } -} - -func TestFuncname(t *testing.T) { - tests := []struct { - name, want string - }{ - {"", ""}, - {"runtime.main", "main"}, - {"github.com/pkg/errors.funcname", "funcname"}, - {"funcname", "funcname"}, - {"io.copyBuffer", "copyBuffer"}, - {"main.(*R).Write", "(*R).Write"}, - } - - for _, tt := range tests { - got := funcname(tt.name) - want := tt.want - if got != want { - t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) - } - } -} - -func TestTrimGOPATH(t *testing.T) { - var tests = []struct { - Frame - want string - }{{ - Frame(initpc), - "github.com/pkg/errors/stack_test.go", - }} - - for i, tt := range tests { - pc := tt.Frame.pc() - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - got := trimGOPATH(fn.Name(), file) - testFormatRegexp(t, i, got, "%s", tt.want) - } -} - -func TestStackTrace(t *testing.T) { - tests := []struct { - err error - want []string - }{{ - New("ooh"), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:172", - }, - }, { - Wrap(New("ooh"), "ahh"), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New - }, - }, { - Cause(Wrap(New("ooh"), "ahh")), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New - }, - }, { - func() error { return New("ooh") }(), []string{ - `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller - }, - }, { - Cause(func() error { - return func() error { - return Errorf("hello %s", fmt.Sprintf("world")) - }() - }()), []string{ - `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf - `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller - }, - }} - for i, tt := range tests { - x, ok := tt.err.(interface { - StackTrace() StackTrace - }) - if !ok { - t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) - continue - } - st := x.StackTrace() - for j, want := range tt.want { - testFormatRegexp(t, i, st[j], "%+v", want) - } - } -} - -func stackTrace() StackTrace { - const depth = 8 - var pcs [depth]uintptr - n := runtime.Callers(1, pcs[:]) - var st stack = pcs[0:n] - return st.StackTrace() -} - -func TestStackTraceFormat(t *testing.T) { - tests := []struct { - StackTrace - format string - want string - }{{ - nil, - "%s", - `\[\]`, - }, { - nil, - "%v", - `\[\]`, - }, { - nil, - "%+v", - "", - }, { - nil, - "%#v", - `\[\]errors.Frame\(nil\)`, - }, { - make(StackTrace, 0), - "%s", - `\[\]`, - }, { - make(StackTrace, 0), - "%v", - `\[\]`, - }, { - make(StackTrace, 0), - "%+v", - "", - }, { - make(StackTrace, 0), - "%#v", - `\[\]errors.Frame{}`, - }, { - stackTrace()[:2], - "%s", - `\[stack_test.go stack_test.go\]`, - }, { - stackTrace()[:2], - "%v", - `\[stack_test.go:225 stack_test.go:272\]`, - }, { - stackTrace()[:2], - "%+v", - "\n" + - "github.com/pkg/errors.stackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:225\n" + - "github.com/pkg/errors.TestStackTraceFormat\n" + - "\t.+/github.com/pkg/errors/stack_test.go:276", - }, { - stackTrace()[:2], - "%#v", - `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) - } -} diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml deleted file mode 100644 index 192c5c27..00000000 --- a/vendor/github.com/pkg/profile/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go_import_path: github.com/pkg/profile -go: - - 1.4.3 - - 1.5.2 - - 1.6.3 - - tip - -script: - - go test github.com/pkg/profile - - go test -race github.com/pkg/profile diff --git a/vendor/github.com/pkg/profile/BUILD.bazel b/vendor/github.com/pkg/profile/BUILD.bazel deleted file mode 100644 index 848ce2f9..00000000 --- a/vendor/github.com/pkg/profile/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "mutex.go", - "mutex17.go", - "profile.go", - "trace.go", - "trace16.go", - ], - importpath = "github.com/pkg/profile", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["profile_test.go"], - embed = [":go_default_library"], - importpath = "github.com/pkg/profile", -) - -go_test( - name = "go_default_xtest", - srcs = [ - "example_test.go", - "trace_test.go", - ], - importpath = "github.com/pkg/profile_test", - deps = [":go_default_library"], -) diff --git a/vendor/github.com/pkg/profile/README.md b/vendor/github.com/pkg/profile/README.md deleted file mode 100644 index 37bfa58c..00000000 --- a/vendor/github.com/pkg/profile/README.md +++ /dev/null @@ -1,54 +0,0 @@ -profile -======= - -Simple profiling support package for Go - -[![Build Status](https://travis-ci.org/pkg/profile.svg?branch=master)](https://travis-ci.org/pkg/profile) [![GoDoc](http://godoc.org/github.com/pkg/profile?status.svg)](http://godoc.org/github.com/pkg/profile) - - -installation ------------- - - go get github.com/pkg/profile - -usage ------ - -Enabling profiling in your application is as simple as one line at the top of your main function - -```go -import "github.com/pkg/profile" - -func main() { - defer profile.Start().Stop() - ... -} -``` - -options -------- - -What to profile is controlled by config value passed to profile.Start. -By default CPU profiling is enabled. - -```go -import "github.com/pkg/profile" - -func main() { - // p.Stop() must be called before the program exits to - // ensure profiling information is written to disk. - p := profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) - ... -} -``` - -Several convenience package level values are provided for cpu, memory, and block (contention) profiling. - -For more complex options, consult the [documentation](http://godoc.org/github.com/pkg/profile). - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a change, please discuss it first by raising an issue. diff --git a/vendor/github.com/pkg/profile/example_test.go b/vendor/github.com/pkg/profile/example_test.go deleted file mode 100644 index 98a54b5e..00000000 --- a/vendor/github.com/pkg/profile/example_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package profile_test - -import ( - "flag" - "os" - - "github.com/pkg/profile" -) - -func ExampleStart() { - // start a simple CPU profile and register - // a defer to Stop (flush) the profiling data. - defer profile.Start().Stop() -} - -func ExampleCPUProfile() { - // CPU profiling is the default profiling mode, but you can specify it - // explicitly for completeness. - defer profile.Start(profile.CPUProfile).Stop() -} - -func ExampleMemProfile() { - // use memory profiling, rather than the default cpu profiling. - defer profile.Start(profile.MemProfile).Stop() -} - -func ExampleMemProfileRate() { - // use memory profiling with custom rate. - defer profile.Start(profile.MemProfileRate(2048)).Stop() -} - -func ExampleProfilePath() { - // set the location that the profile will be written to - defer profile.Start(profile.ProfilePath(os.Getenv("HOME"))).Stop() -} - -func ExampleNoShutdownHook() { - // disable the automatic shutdown hook. - defer profile.Start(profile.NoShutdownHook).Stop() -} - -func ExampleStart_withFlags() { - // use the flags package to selectively enable profiling. - mode := flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]") - flag.Parse() - switch *mode { - case "cpu": - defer profile.Start(profile.CPUProfile).Stop() - case "mem": - defer profile.Start(profile.MemProfile).Stop() - case "mutex": - defer profile.Start(profile.MutexProfile).Stop() - case "block": - defer profile.Start(profile.BlockProfile).Stop() - default: - // do nothing - } -} diff --git a/vendor/github.com/pkg/profile/profile_test.go b/vendor/github.com/pkg/profile/profile_test.go deleted file mode 100644 index e33012ce..00000000 --- a/vendor/github.com/pkg/profile/profile_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package profile - -import ( - "bufio" - "bytes" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" -) - -type checkFn func(t *testing.T, stdout, stderr []byte, err error) - -func TestProfile(t *testing.T) { - f, err := ioutil.TempFile("", "profile_test") - if err != nil { - t.Fatal(err) - } - defer os.Remove(f.Name()) - - var profileTests = []struct { - name string - code string - checks []checkFn - }{{ - name: "default profile (cpu)", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start().Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: cpu profiling enabled"), - NoErr, - }, - }, { - name: "memory profile", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.MemProfile).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: memory profiling enabled"), - NoErr, - }, - }, { - name: "memory profile (rate 2048)", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.MemProfileRate(2048)).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: memory profiling enabled (rate 2048)"), - NoErr, - }, - }, { - name: "double start", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - profile.Start() - profile.Start() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("cpu profiling enabled", "profile: Start() already called"), - Err, - }, - }, { - name: "block profile", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.BlockProfile).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: block profiling enabled"), - NoErr, - }, - }, { - name: "mutex profile", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.MutexProfile).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: mutex profiling enabled"), - NoErr, - }, - }, { - name: "profile path", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.ProfilePath(".")).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: cpu profiling enabled, cpu.pprof"), - NoErr, - }, - }, { - name: "profile path error", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.ProfilePath("` + f.Name() + `")).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("could not create initial output"), - Err, - }, - }, { - name: "multiple profile sessions", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - profile.Start(profile.CPUProfile).Stop() - profile.Start(profile.MemProfile).Stop() - profile.Start(profile.BlockProfile).Stop() - profile.Start(profile.CPUProfile).Stop() - profile.Start(profile.MutexProfile).Stop() -} -`, - checks: []checkFn{ - NoStdout, - Stderr("profile: cpu profiling enabled", - "profile: cpu profiling disabled", - "profile: memory profiling enabled", - "profile: memory profiling disabled", - "profile: block profiling enabled", - "profile: block profiling disabled", - "profile: cpu profiling enabled", - "profile: cpu profiling disabled", - "profile: mutex profiling enabled", - "profile: mutex profiling disabled"), - NoErr, - }, - }, { - name: "profile quiet", - code: ` -package main - -import "github.com/pkg/profile" - -func main() { - defer profile.Start(profile.Quiet).Stop() -} -`, - checks: []checkFn{NoStdout, NoStderr, NoErr}, - }} - for _, tt := range profileTests { - t.Log(tt.name) - stdout, stderr, err := runTest(t, tt.code) - for _, f := range tt.checks { - f(t, stdout, stderr, err) - } - } -} - -// NoStdout checks that stdout was blank. -func NoStdout(t *testing.T, stdout, _ []byte, _ error) { - if len := len(stdout); len > 0 { - t.Errorf("stdout: wanted 0 bytes, got %d", len) - } -} - -// Stderr verifies that the given lines match the output from stderr -func Stderr(lines ...string) checkFn { - return func(t *testing.T, _, stderr []byte, _ error) { - r := bytes.NewReader(stderr) - if !validateOutput(r, lines) { - t.Errorf("stderr: wanted '%s', got '%s'", lines, stderr) - } - } -} - -// NoStderr checks that stderr was blank. -func NoStderr(t *testing.T, _, stderr []byte, _ error) { - if len := len(stderr); len > 0 { - t.Errorf("stderr: wanted 0 bytes, got %d", len) - } -} - -// Err checks that there was an error returned -func Err(t *testing.T, _, _ []byte, err error) { - if err == nil { - t.Errorf("expected error") - } -} - -// NoErr checks that err was nil -func NoErr(t *testing.T, _, _ []byte, err error) { - if err != nil { - t.Errorf("error: expected nil, got %v", err) - } -} - -// validatedOutput validates the given slice of lines against data from the given reader. -func validateOutput(r io.Reader, want []string) bool { - s := bufio.NewScanner(r) - for _, line := range want { - if !s.Scan() || !strings.Contains(s.Text(), line) { - return false - } - } - return true -} - -var validateOutputTests = []struct { - input string - lines []string - want bool -}{{ - input: "", - want: true, -}, { - input: `profile: yes -`, - want: true, -}, { - input: `profile: yes -`, - lines: []string{"profile: yes"}, - want: true, -}, { - input: `profile: yes -profile: no -`, - lines: []string{"profile: yes"}, - want: true, -}, { - input: `profile: yes -profile: no -`, - lines: []string{"profile: yes", "profile: no"}, - want: true, -}, { - input: `profile: yes -profile: no -`, - lines: []string{"profile: no"}, - want: false, -}} - -func TestValidateOutput(t *testing.T) { - for _, tt := range validateOutputTests { - r := strings.NewReader(tt.input) - got := validateOutput(r, tt.lines) - if tt.want != got { - t.Errorf("validateOutput(%q, %q), want %v, got %v", tt.input, tt.lines, tt.want, got) - } - } -} - -// runTest executes the go program supplied and returns the contents of stdout, -// stderr, and an error which may contain status information about the result -// of the program. -func runTest(t *testing.T, code string) ([]byte, []byte, error) { - chk := func(err error) { - if err != nil { - t.Fatal(err) - } - } - gopath, err := ioutil.TempDir("", "profile-gopath") - chk(err) - defer os.RemoveAll(gopath) - - srcdir := filepath.Join(gopath, "src") - err = os.Mkdir(srcdir, 0755) - chk(err) - src := filepath.Join(srcdir, "main.go") - err = ioutil.WriteFile(src, []byte(code), 0644) - chk(err) - - cmd := exec.Command("go", "run", src) - - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err = cmd.Run() - return stdout.Bytes(), stderr.Bytes(), err -} diff --git a/vendor/github.com/pkg/profile/trace_test.go b/vendor/github.com/pkg/profile/trace_test.go deleted file mode 100644 index 6a61d793..00000000 --- a/vendor/github.com/pkg/profile/trace_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package profile_test - -import "github.com/pkg/profile" - -func ExampleTraceProfile() { - // use execution tracing, rather than the default cpu profiling. - defer profile.Start(profile.TraceProfile).Stop() -} diff --git a/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/pmezard/go-difflib/.travis.yml deleted file mode 100644 index 90c9c6f9..00000000 --- a/vendor/github.com/pmezard/go-difflib/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.5 - - tip - diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md deleted file mode 100644 index e87f307e..00000000 --- a/vendor/github.com/pmezard/go-difflib/README.md +++ /dev/null @@ -1,50 +0,0 @@ -go-difflib -========== - -[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib) -[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib) - -Go-difflib is a partial port of python 3 difflib package. Its main goal -was to make unified and context diff available in pure Go, mostly for -testing purposes. - -The following class and functions (and related tests) have be ported: - -* `SequenceMatcher` -* `unified_diff()` -* `context_diff()` - -## Installation - -```bash -$ go get github.com/pmezard/go-difflib/difflib -``` - -### Quick Start - -Diffs are configured with Unified (or ContextDiff) structures, and can -be output to an io.Writer or returned as a string. - -```Go -diff := UnifiedDiff{ - A: difflib.SplitLines("foo\nbar\n"), - B: difflib.SplitLines("foo\nbaz\n"), - FromFile: "Original", - ToFile: "Current", - Context: 3, -} -text, _ := GetUnifiedDiffString(diff) -fmt.Printf(text) -``` - -would output: - -``` ---- Original -+++ Current -@@ -1,3 +1,3 @@ - foo --bar -+baz -``` - diff --git a/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel b/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel deleted file mode 100644 index c44638cb..00000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["difflib.go"], - importpath = "github.com/pmezard/go-difflib/difflib", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["difflib_test.go"], - embed = [":go_default_library"], - importpath = "github.com/pmezard/go-difflib/difflib", -) diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go deleted file mode 100644 index d7251196..00000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package difflib - -import ( - "bytes" - "fmt" - "math" - "reflect" - "strings" - "testing" -) - -func assertAlmostEqual(t *testing.T, a, b float64, places int) { - if math.Abs(a-b) > math.Pow10(-places) { - t.Errorf("%.7f != %.7f", a, b) - } -} - -func assertEqual(t *testing.T, a, b interface{}) { - if !reflect.DeepEqual(a, b) { - t.Errorf("%v != %v", a, b) - } -} - -func splitChars(s string) []string { - chars := make([]string, 0, len(s)) - // Assume ASCII inputs - for i := 0; i != len(s); i++ { - chars = append(chars, string(s[i])) - } - return chars -} - -func TestSequenceMatcherRatio(t *testing.T) { - s := NewMatcher(splitChars("abcd"), splitChars("bcde")) - assertEqual(t, s.Ratio(), 0.75) - assertEqual(t, s.QuickRatio(), 0.75) - assertEqual(t, s.RealQuickRatio(), 1.0) -} - -func TestGetOptCodes(t *testing.T) { - a := "qabxcd" - b := "abycdf" - s := NewMatcher(splitChars(a), splitChars(b)) - w := &bytes.Buffer{} - for _, op := range s.GetOpCodes() { - fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag), - op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2]) - } - result := string(w.Bytes()) - expected := `d a[0:1], (q) b[0:0] () -e a[1:3], (ab) b[0:2] (ab) -r a[3:4], (x) b[2:3] (y) -e a[4:6], (cd) b[3:5] (cd) -i a[6:6], () b[5:6] (f) -` - if expected != result { - t.Errorf("unexpected op codes: \n%s", result) - } -} - -func TestGroupedOpCodes(t *testing.T) { - a := []string{} - for i := 0; i != 39; i++ { - a = append(a, fmt.Sprintf("%02d", i)) - } - b := []string{} - b = append(b, a[:8]...) - b = append(b, " i") - b = append(b, a[8:19]...) - b = append(b, " x") - b = append(b, a[20:22]...) - b = append(b, a[27:34]...) - b = append(b, " y") - b = append(b, a[35:]...) - s := NewMatcher(a, b) - w := &bytes.Buffer{} - for _, g := range s.GetGroupedOpCodes(-1) { - fmt.Fprintf(w, "group\n") - for _, op := range g { - fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag), - op.I1, op.I2, op.J1, op.J2) - } - } - result := string(w.Bytes()) - expected := `group - e, 5, 8, 5, 8 - i, 8, 8, 8, 9 - e, 8, 11, 9, 12 -group - e, 16, 19, 17, 20 - r, 19, 20, 20, 21 - e, 20, 22, 21, 23 - d, 22, 27, 23, 23 - e, 27, 30, 23, 26 -group - e, 31, 34, 27, 30 - r, 34, 35, 30, 31 - e, 35, 38, 31, 34 -` - if expected != result { - t.Errorf("unexpected op codes: \n%s", result) - } -} - -func ExampleGetUnifiedDiffCode() { - a := `one -two -three -four -fmt.Printf("%s,%T",a,b)` - b := `zero -one -three -four` - diff := UnifiedDiff{ - A: SplitLines(a), - B: SplitLines(b), - FromFile: "Original", - FromDate: "2005-01-26 23:30:50", - ToFile: "Current", - ToDate: "2010-04-02 10:20:52", - Context: 3, - } - result, _ := GetUnifiedDiffString(diff) - fmt.Println(strings.Replace(result, "\t", " ", -1)) - // Output: - // --- Original 2005-01-26 23:30:50 - // +++ Current 2010-04-02 10:20:52 - // @@ -1,5 +1,4 @@ - // +zero - // one - // -two - // three - // four - // -fmt.Printf("%s,%T",a,b) -} - -func ExampleGetContextDiffCode() { - a := `one -two -three -four -fmt.Printf("%s,%T",a,b)` - b := `zero -one -tree -four` - diff := ContextDiff{ - A: SplitLines(a), - B: SplitLines(b), - FromFile: "Original", - ToFile: "Current", - Context: 3, - Eol: "\n", - } - result, _ := GetContextDiffString(diff) - fmt.Print(strings.Replace(result, "\t", " ", -1)) - // Output: - // *** Original - // --- Current - // *************** - // *** 1,5 **** - // one - // ! two - // ! three - // four - // - fmt.Printf("%s,%T",a,b) - // --- 1,4 ---- - // + zero - // one - // ! tree - // four -} - -func ExampleGetContextDiffString() { - a := `one -two -three -four` - b := `zero -one -tree -four` - diff := ContextDiff{ - A: SplitLines(a), - B: SplitLines(b), - FromFile: "Original", - ToFile: "Current", - Context: 3, - Eol: "\n", - } - result, _ := GetContextDiffString(diff) - fmt.Printf(strings.Replace(result, "\t", " ", -1)) - // Output: - // *** Original - // --- Current - // *************** - // *** 1,4 **** - // one - // ! two - // ! three - // four - // --- 1,4 ---- - // + zero - // one - // ! tree - // four -} - -func rep(s string, count int) string { - return strings.Repeat(s, count) -} - -func TestWithAsciiOneInsert(t *testing.T) { - sm := NewMatcher(splitChars(rep("b", 100)), - splitChars("a"+rep("b", 100))) - assertAlmostEqual(t, sm.Ratio(), 0.995, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}}) - assertEqual(t, len(sm.bPopular), 0) - - sm = NewMatcher(splitChars(rep("b", 100)), - splitChars(rep("b", 50)+"a"+rep("b", 50))) - assertAlmostEqual(t, sm.Ratio(), 0.995, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}}) - assertEqual(t, len(sm.bPopular), 0) -} - -func TestWithAsciiOnDelete(t *testing.T) { - sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)), - splitChars(rep("a", 40)+rep("b", 40))) - assertAlmostEqual(t, sm.Ratio(), 0.994, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}}) -} - -func TestWithAsciiBJunk(t *testing.T) { - isJunk := func(s string) bool { - return s == " " - } - sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)), true, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{}) - - sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}}) - - isJunk = func(s string) bool { - return s == " " || s == "b" - } - sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}}) -} - -func TestSFBugsRatioForNullSeqn(t *testing.T) { - sm := NewMatcher(nil, nil) - assertEqual(t, sm.Ratio(), 1.0) - assertEqual(t, sm.QuickRatio(), 1.0) - assertEqual(t, sm.RealQuickRatio(), 1.0) -} - -func TestSFBugsComparingEmptyLists(t *testing.T) { - groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1) - assertEqual(t, len(groups), 0) - diff := UnifiedDiff{ - FromFile: "Original", - ToFile: "Current", - Context: 3, - } - result, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, result, "") -} - -func TestOutputFormatRangeFormatUnified(t *testing.T) { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - // - // Each field shall be of the form: - // %1d", if the range contains exactly one line, - // and: - // "%1d,%1d", , otherwise. - // If a range is empty, its beginning line number shall be the number of - // the line just before the range, or 0 if the empty range starts the file. - fm := formatRangeUnified - assertEqual(t, fm(3, 3), "3,0") - assertEqual(t, fm(3, 4), "4") - assertEqual(t, fm(3, 5), "4,2") - assertEqual(t, fm(3, 6), "4,3") - assertEqual(t, fm(0, 0), "0,0") -} - -func TestOutputFormatRangeFormatContext(t *testing.T) { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - // - // The range of lines in file1 shall be written in the following format - // if the range contains two or more lines: - // "*** %d,%d ****\n", , - // and the following format otherwise: - // "*** %d ****\n", - // The ending line number of an empty range shall be the number of the preceding line, - // or 0 if the range is at the start of the file. - // - // Next, the range of lines in file2 shall be written in the following format - // if the range contains two or more lines: - // "--- %d,%d ----\n", , - // and the following format otherwise: - // "--- %d ----\n", - fm := formatRangeContext - assertEqual(t, fm(3, 3), "3") - assertEqual(t, fm(3, 4), "4") - assertEqual(t, fm(3, 5), "4,5") - assertEqual(t, fm(3, 6), "4,6") - assertEqual(t, fm(0, 0), "0") -} - -func TestOutputFormatTabDelimiter(t *testing.T) { - diff := UnifiedDiff{ - A: splitChars("one"), - B: splitChars("two"), - FromFile: "Original", - FromDate: "2005-01-26 23:30:50", - ToFile: "Current", - ToDate: "2010-04-12 10:20:52", - Eol: "\n", - } - ud, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(ud)[:2], []string{ - "--- Original\t2005-01-26 23:30:50\n", - "+++ Current\t2010-04-12 10:20:52\n", - }) - cd, err := GetContextDiffString(ContextDiff(diff)) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(cd)[:2], []string{ - "*** Original\t2005-01-26 23:30:50\n", - "--- Current\t2010-04-12 10:20:52\n", - }) -} - -func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) { - diff := UnifiedDiff{ - A: splitChars("one"), - B: splitChars("two"), - FromFile: "Original", - ToFile: "Current", - Eol: "\n", - } - ud, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"}) - - cd, err := GetContextDiffString(ContextDiff(diff)) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"}) -} - -func TestOmitFilenames(t *testing.T) { - diff := UnifiedDiff{ - A: SplitLines("o\nn\ne\n"), - B: SplitLines("t\nw\no\n"), - Eol: "\n", - } - ud, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(ud), []string{ - "@@ -0,0 +1,2 @@\n", - "+t\n", - "+w\n", - "@@ -2,2 +3,0 @@\n", - "-n\n", - "-e\n", - "\n", - }) - - cd, err := GetContextDiffString(ContextDiff(diff)) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(cd), []string{ - "***************\n", - "*** 0 ****\n", - "--- 1,2 ----\n", - "+ t\n", - "+ w\n", - "***************\n", - "*** 2,3 ****\n", - "- n\n", - "- e\n", - "--- 3 ----\n", - "\n", - }) -} - -func TestSplitLines(t *testing.T) { - allTests := []struct { - input string - want []string - }{ - {"foo", []string{"foo\n"}}, - {"foo\nbar", []string{"foo\n", "bar\n"}}, - {"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}}, - } - for _, test := range allTests { - assertEqual(t, SplitLines(test.input), test.want) - } -} - -func benchmarkSplitLines(b *testing.B, count int) { - str := strings.Repeat("foo\n", count) - - b.ResetTimer() - - n := 0 - for i := 0; i < b.N; i++ { - n += len(SplitLines(str)) - } -} - -func BenchmarkSplitLines100(b *testing.B) { - benchmarkSplitLines(b, 100) -} - -func BenchmarkSplitLines10000(b *testing.B) { - benchmarkSplitLines(b, 10000) -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a0..00000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index a23296a5..00000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - tip -env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 -install: - - go get github.com/stretchr/testify/assert - - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows -script: - - go test -race -v ./... diff --git a/vendor/github.com/sirupsen/logrus/BUILD.bazel b/vendor/github.com/sirupsen/logrus/BUILD.bazel deleted file mode 100644 index ed862c32..00000000 --- a/vendor/github.com/sirupsen/logrus/BUILD.bazel +++ /dev/null @@ -1,94 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "alt_exit.go", - "doc.go", - "entry.go", - "exported.go", - "formatter.go", - "hooks.go", - "json_formatter.go", - "logger.go", - "logrus.go", - "terminal_check_notappengine.go", - "text_formatter.go", - "writer.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "terminal_bsd.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "terminal_bsd.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "terminal_bsd.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "terminal_linux.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "terminal_bsd.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "terminal_bsd.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/sirupsen/logrus", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "alt_exit_test.go", - "entry_test.go", - "formatter_bench_test.go", - "hook_test.go", - "json_formatter_test.go", - "logger_bench_test.go", - "logrus_test.go", - "text_formatter_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/sirupsen/logrus", - deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "example_basic_test.go", - "example_hook_test.go", - ], - importpath = "github.com/sirupsen/logrus_test", - deps = [ - ":go_default_library", - "//vendor/gopkg.in/gemnasium/logrus-airbrake-hook.v2:go_default_library", - ], -) diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 1bd1deb2..00000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,123 +0,0 @@ -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index f77819b1..00000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,511 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) -| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | -| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | -| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | -| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | -| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | -| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit_test.go b/vendor/github.com/sirupsen/logrus/alt_exit_test.go deleted file mode 100644 index a08b1a89..00000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package logrus - -import ( - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "testing" - "time" -) - -func TestRegister(t *testing.T) { - current := len(handlers) - RegisterExitHandler(func() {}) - if len(handlers) != current+1 { - t.Fatalf("expected %d handlers, got %d", current+1, len(handlers)) - } -} - -func TestHandler(t *testing.T) { - tempDir, err := ioutil.TempDir("", "test_handler") - if err != nil { - log.Fatalf("can't create temp dir. %q", err) - } - defer os.RemoveAll(tempDir) - - gofile := filepath.Join(tempDir, "gofile.go") - if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { - t.Fatalf("can't create go file. %q", err) - } - - outfile := filepath.Join(tempDir, "outfile.out") - arg := time.Now().UTC().String() - err = exec.Command("go", "run", gofile, outfile, arg).Run() - if err == nil { - t.Fatalf("completed normally, should have failed") - } - - data, err := ioutil.ReadFile(outfile) - if err != nil { - t.Fatalf("can't read output file %s. %q", outfile, err) - } - - if string(data) != arg { - t.Fatalf("bad data. Expected %q, got %q", data, arg) - } -} - -var testprog = []byte(` -// Test program for atexit, gets output file and data as arguments and writes -// data to output file in atexit handler. -package main - -import ( - "github.com/sirupsen/logrus" - "flag" - "fmt" - "io/ioutil" -) - -var outfile = "" -var data = "" - -func handler() { - ioutil.WriteFile(outfile, []byte(data), 0666) -} - -func badHandler() { - n := 0 - fmt.Println(1/n) -} - -func main() { - flag.Parse() - outfile = flag.Arg(0) - data = flag.Arg(1) - - logrus.RegisterExitHandler(handler) - logrus.RegisterExitHandler(badHandler) - logrus.Fatal("Bye bye") -} -`) diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index 96c2ce15..00000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 778f4c9f..4efedddf 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -41,14 +41,14 @@ type Entry struct { // Message passed to Debug, Info, Warn, Error, Fatal or Panic Message string - // When formatter is called in entry.log(), an Buffer may be set to entry + // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is three fields, give a little extra room + // Default is five fields, give a little extra room Data: make(Fields, 5), } } @@ -83,14 +83,28 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range fields { data[k] = v } - return &Entry{Logger: entry.Logger, Data: data} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { var buffer *bytes.Buffer - entry.Time = time.Now() + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + entry.Level = level entry.Message = msg @@ -113,21 +127,19 @@ func (entry Entry) log(level Level, msg string) { } } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) fireHooks() { +func (entry *Entry) fireHooks() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, &entry) + err := entry.Logger.Hooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { - serialized, err := entry.Logger.Formatter.Format(entry) entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) } else { @@ -139,7 +151,7 @@ func (entry *Entry) write() { } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { + if entry.Logger.IsLevelEnabled(DebugLevel) { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -149,13 +161,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { + if entry.Logger.IsLevelEnabled(InfoLevel) { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { + if entry.Logger.IsLevelEnabled(WarnLevel) { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -165,20 +177,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { + if entry.Logger.IsLevelEnabled(ErrorLevel) { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { + if entry.Logger.IsLevelEnabled(FatalLevel) { entry.log(FatalLevel, fmt.Sprint(args...)) } Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { + if entry.Logger.IsLevelEnabled(PanicLevel) { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -187,13 +199,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.level() >= DebugLevel { + if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.level() >= InfoLevel { + if entry.Logger.IsLevelEnabled(InfoLevel) { entry.Info(fmt.Sprintf(format, args...)) } } @@ -203,7 +215,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.level() >= WarnLevel { + if entry.Logger.IsLevelEnabled(WarnLevel) { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -213,20 +225,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { + if entry.Logger.IsLevelEnabled(ErrorLevel) { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.level() >= FatalLevel { + if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(fmt.Sprintf(format, args...)) } Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.level() >= PanicLevel { + if entry.Logger.IsLevelEnabled(PanicLevel) { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -234,13 +246,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { + if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { + if entry.Logger.IsLevelEnabled(InfoLevel) { entry.Info(entry.sprintlnn(args...)) } } @@ -250,7 +262,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { + if entry.Logger.IsLevelEnabled(WarnLevel) { entry.Warn(entry.sprintlnn(args...)) } } @@ -260,20 +272,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { + if entry.Logger.IsLevelEnabled(ErrorLevel) { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { + if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(entry.sprintlnn(args...)) } Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { + if entry.Logger.IsLevelEnabled(PanicLevel) { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/vendor/github.com/sirupsen/logrus/entry_test.go b/vendor/github.com/sirupsen/logrus/entry_test.go deleted file mode 100644 index a81e2b38..00000000 --- a/vendor/github.com/sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryWithError(t *testing.T) { - - assert := assert.New(t) - - defer func() { - ErrorKey = "error" - }() - - err := fmt.Errorf("kaboom at layer %d", 4711) - - assert.Equal(err, WithError(err).Data["error"]) - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - - assert.Equal(err, entry.WithError(err).Data["error"]) - - ErrorKey = "err" - - assert.Equal(err, entry.WithError(err).Data["err"]) - -} - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} - -const ( - badMessage = "this is going to panic" - panicMessage = "this is broken" -) - -type panickyHook struct{} - -func (p *panickyHook) Levels() []Level { - return []Level{InfoLevel} -} - -func (p *panickyHook) Fire(entry *Entry) error { - if entry.Message == badMessage { - panic(panicMessage) - } - - return nil -} - -func TestEntryHooksPanic(t *testing.T) { - logger := New() - logger.Out = &bytes.Buffer{} - logger.Level = InfoLevel - logger.Hooks.Add(&panickyHook{}) - - defer func() { - p := recover() - assert.NotNil(t, p) - assert.Equal(t, panicMessage, p) - - entry := NewEntry(logger) - entry.Info("another message") - }() - - entry := NewEntry(logger) - entry.Info(badMessage) -} diff --git a/vendor/github.com/sirupsen/logrus/example_basic_test.go b/vendor/github.com/sirupsen/logrus/example_basic_test.go deleted file mode 100644 index a2acf550..00000000 --- a/vendor/github.com/sirupsen/logrus/example_basic_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package logrus_test - -import ( - "github.com/sirupsen/logrus" - "os" -) - -func Example_basic() { - var log = logrus.New() - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) //default - log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output - log.Level = logrus.DebugLevel - log.Out = os.Stdout - - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - defer func() { - err := recover() - if err != nil { - entry := err.(*logrus.Entry) - log.WithFields(logrus.Fields{ - "omg": true, - "err_animal": entry.Data["animal"], - "err_size": entry.Data["size"], - "err_level": entry.Level, - "err_message": entry.Message, - "number": 100, - }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") - - // Output: - // level=debug msg="Started observing beach" animal=walrus number=8 - // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 - // level=warning msg="The group's number increased tremendously!" number=122 omg=true - // level=debug msg="Temperature changes" temperature=-4 - // level=panic msg="It's over 9000!" animal=orca size=9009 - // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true -} diff --git a/vendor/github.com/sirupsen/logrus/example_hook_test.go b/vendor/github.com/sirupsen/logrus/example_hook_test.go deleted file mode 100644 index d4ddffca..00000000 --- a/vendor/github.com/sirupsen/logrus/example_hook_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package logrus_test - -import ( - "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" - "os" -) - -func Example_hook() { - var log = logrus.New() - log.Formatter = new(logrus.TextFormatter) // default - log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output - log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) - log.Out = os.Stdout - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Error("The ice breaks!") - - // Output: - // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 - // level=warning msg="The group's number increased tremendously!" number=122 omg=true - // level=error msg="The ice breaks!" number=100 omg=true -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 013183ed..fb2a7a1f 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -2,6 +2,7 @@ package logrus import ( "io" + "time" ) var ( @@ -15,37 +16,32 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter + std.SetFormatter(formatter) } // SetLevel sets the standard logger level. func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.level() + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) + std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. @@ -72,6 +68,15 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -107,7 +112,7 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } @@ -147,7 +152,7 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } @@ -187,7 +192,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index b183ff5b..83c74947 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -30,16 +30,22 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t +func prefixFieldClashes(data Fields, fieldMap FieldMap) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) } - if m, ok := data["msg"]; ok { - data["fields.msg"] = m + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) } - if l, ok := data["level"]; ok { - data["fields.level"] = l + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) } } diff --git a/vendor/github.com/sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index d9481589..00000000 --- a/vendor/github.com/sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - logger := New() - - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - Logger: logger, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/vendor/github.com/sirupsen/logrus/hook_test.go b/vendor/github.com/sirupsen/logrus/hook_test.go deleted file mode 100644 index 4fea7514..00000000 --- a/vendor/github.com/sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package logrus - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -func TestAddHookRace(t *testing.T) { - var wg sync.WaitGroup - wg.Add(2) - hook := new(ErrorHook) - LogAndAssertJSON(t, func(log *Logger) { - go func() { - defer wg.Done() - log.AddHook(hook) - }() - go func() { - defer wg.Done() - log.Error("test") - }() - wg.Wait() - }, func(fields Fields) { - // the line may have been logged - // before the hook was added, so we can't - // actually assert on the hook - }) -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index fb01c1b1..d3dadefe 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -1,6 +1,7 @@ package logrus import ( + "bytes" "encoding/json" "fmt" ) @@ -33,6 +34,9 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ @@ -43,6 +47,9 @@ type JSONFormatter struct { // }, // } FieldMap FieldMap + + // PrettyPrint will indent all json logs + PrettyPrint bool } // Format renders a single log entry @@ -58,7 +65,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[k] = v } } - prefixFieldClashes(data) + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap) timestampFormat := f.TimestampFormat if timestampFormat == "" { @@ -71,9 +85,20 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - serialized, err := json.Marshal(data) - if err != nil { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) } - return append(serialized, '\n'), nil + + return b.Bytes(), nil } diff --git a/vendor/github.com/sirupsen/logrus/json_formatter_test.go b/vendor/github.com/sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 51093a79..00000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - "strings" - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} - -func TestJSONMessageKey(t *testing.T) { - formatter := &JSONFormatter{ - FieldMap: FieldMap{ - FieldKeyMsg: "message", - }, - } - - b, err := formatter.Format(&Entry{Message: "oh hai"}) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - s := string(b) - if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) { - t.Fatal("Expected JSON to format message key") - } -} - -func TestJSONLevelKey(t *testing.T) { - formatter := &JSONFormatter{ - FieldMap: FieldMap{ - FieldKeyLevel: "somelevel", - }, - } - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - s := string(b) - if !strings.Contains(s, "somelevel") { - t.Fatal("Expected JSON to format level key") - } -} - -func TestJSONTimeKey(t *testing.T) { - formatter := &JSONFormatter{ - FieldMap: FieldMap{ - FieldKeyTime: "timeywimey", - }, - } - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - s := string(b) - if !strings.Contains(s, "timeywimey") { - t.Fatal("Expected JSON to format time key") - } -} - -func TestJSONDisableTimestamp(t *testing.T) { - formatter := &JSONFormatter{ - DisableTimestamp: true, - } - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - s := string(b) - if strings.Contains(s, FieldKeyTime) { - t.Error("Did not prevent timestamp", s) - } -} - -func TestJSONEnableTimestamp(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - s := string(b) - if !strings.Contains(s, FieldKeyTime) { - t.Error("Timestamp not present", s) - } -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index fdaf8a65..b67bfcbd 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -5,12 +5,13 @@ import ( "os" "sync" "sync/atomic" + "time" ) type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. + // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking @@ -84,11 +85,12 @@ func (logger *Logger) newEntry() *Entry { } func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -112,8 +114,15 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.level() >= DebugLevel { + if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() entry.Debugf(format, args...) logger.releaseEntry(entry) @@ -121,7 +130,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) { } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.level() >= InfoLevel { + if logger.IsLevelEnabled(InfoLevel) { entry := logger.newEntry() entry.Infof(format, args...) logger.releaseEntry(entry) @@ -135,7 +144,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -143,7 +152,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) { } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -151,7 +160,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) { } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.level() >= ErrorLevel { + if logger.IsLevelEnabled(ErrorLevel) { entry := logger.newEntry() entry.Errorf(format, args...) logger.releaseEntry(entry) @@ -159,7 +168,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) { } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.level() >= FatalLevel { + if logger.IsLevelEnabled(FatalLevel) { entry := logger.newEntry() entry.Fatalf(format, args...) logger.releaseEntry(entry) @@ -168,7 +177,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.level() >= PanicLevel { + if logger.IsLevelEnabled(PanicLevel) { entry := logger.newEntry() entry.Panicf(format, args...) logger.releaseEntry(entry) @@ -176,7 +185,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } func (logger *Logger) Debug(args ...interface{}) { - if logger.level() >= DebugLevel { + if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() entry.Debug(args...) logger.releaseEntry(entry) @@ -184,7 +193,7 @@ func (logger *Logger) Debug(args ...interface{}) { } func (logger *Logger) Info(args ...interface{}) { - if logger.level() >= InfoLevel { + if logger.IsLevelEnabled(InfoLevel) { entry := logger.newEntry() entry.Info(args...) logger.releaseEntry(entry) @@ -198,7 +207,7 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -206,7 +215,7 @@ func (logger *Logger) Warn(args ...interface{}) { } func (logger *Logger) Warning(args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -214,7 +223,7 @@ func (logger *Logger) Warning(args ...interface{}) { } func (logger *Logger) Error(args ...interface{}) { - if logger.level() >= ErrorLevel { + if logger.IsLevelEnabled(ErrorLevel) { entry := logger.newEntry() entry.Error(args...) logger.releaseEntry(entry) @@ -222,7 +231,7 @@ func (logger *Logger) Error(args ...interface{}) { } func (logger *Logger) Fatal(args ...interface{}) { - if logger.level() >= FatalLevel { + if logger.IsLevelEnabled(FatalLevel) { entry := logger.newEntry() entry.Fatal(args...) logger.releaseEntry(entry) @@ -231,7 +240,7 @@ func (logger *Logger) Fatal(args ...interface{}) { } func (logger *Logger) Panic(args ...interface{}) { - if logger.level() >= PanicLevel { + if logger.IsLevelEnabled(PanicLevel) { entry := logger.newEntry() entry.Panic(args...) logger.releaseEntry(entry) @@ -239,7 +248,7 @@ func (logger *Logger) Panic(args ...interface{}) { } func (logger *Logger) Debugln(args ...interface{}) { - if logger.level() >= DebugLevel { + if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() entry.Debugln(args...) logger.releaseEntry(entry) @@ -247,7 +256,7 @@ func (logger *Logger) Debugln(args ...interface{}) { } func (logger *Logger) Infoln(args ...interface{}) { - if logger.level() >= InfoLevel { + if logger.IsLevelEnabled(InfoLevel) { entry := logger.newEntry() entry.Infoln(args...) logger.releaseEntry(entry) @@ -261,7 +270,7 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -269,7 +278,7 @@ func (logger *Logger) Warnln(args ...interface{}) { } func (logger *Logger) Warningln(args ...interface{}) { - if logger.level() >= WarnLevel { + if logger.IsLevelEnabled(WarnLevel) { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -277,7 +286,7 @@ func (logger *Logger) Warningln(args ...interface{}) { } func (logger *Logger) Errorln(args ...interface{}) { - if logger.level() >= ErrorLevel { + if logger.IsLevelEnabled(ErrorLevel) { entry := logger.newEntry() entry.Errorln(args...) logger.releaseEntry(entry) @@ -285,7 +294,7 @@ func (logger *Logger) Errorln(args ...interface{}) { } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.level() >= FatalLevel { + if logger.IsLevelEnabled(FatalLevel) { entry := logger.newEntry() entry.Fatalln(args...) logger.releaseEntry(entry) @@ -294,7 +303,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { } func (logger *Logger) Panicln(args ...interface{}) { - if logger.level() >= PanicLevel { + if logger.IsLevelEnabled(PanicLevel) { entry := logger.newEntry() entry.Panicln(args...) logger.releaseEntry(entry) @@ -312,12 +321,47 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } +// SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logger_bench_test.go b/vendor/github.com/sirupsen/logrus/logger_bench_test.go deleted file mode 100644 index dd23a353..00000000 --- a/vendor/github.com/sirupsen/logrus/logger_bench_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "os" - "testing" -) - -// smallFields is a small size data set for benchmarking -var loggerFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -func BenchmarkDummyLogger(b *testing.B) { - nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) - if err != nil { - b.Fatalf("%v", err) - } - defer nullf.Close() - doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkDummyLoggerNoLock(b *testing.B) { - nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - b.Fatalf("%v", err) - } - defer nullf.Close() - doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) -} - -func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { - logger := Logger{ - Out: out, - Level: InfoLevel, - Formatter: formatter, - } - entry := logger.WithFields(fields) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - entry.Info("aaa") - } - }) -} - -func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { - logger := Logger{ - Out: out, - Level: InfoLevel, - Formatter: formatter, - } - logger.SetNoLock() - entry := logger.WithFields(fields) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - entry.Info("aaa") - } - }) -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index dd389997..fa0b9dea 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -140,4 +140,11 @@ type FieldLogger interface { Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool } diff --git a/vendor/github.com/sirupsen/logrus/logrus_test.go b/vendor/github.com/sirupsen/logrus/logrus_test.go deleted file mode 100644 index 78cbc282..00000000 --- a/vendor/github.com/sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("PANIC") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("FATAL") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("ERROR") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("WARN") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("WARNING") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("INFO") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("DEBUG") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} - -func TestLoggingRace(t *testing.T) { - logger := New() - - var wg sync.WaitGroup - wg.Add(100) - - for i := 0; i < 100; i++ { - go func() { - logger.Info("info") - wg.Done() - }() - } - wg.Wait() -} - -// Compile test -func TestLogrusInterface(t *testing.T) { - var buffer bytes.Buffer - fn := func(l FieldLogger) { - b := l.WithField("key", "value") - b.Debug("Test") - } - // test logger - logger := New() - logger.Out = &buffer - fn(logger) - - // test Entry - e := logger.WithField("another", "value") - fn(e) -} - -// Implements io.Writer using channels for synchronization, so we can wait on -// the Entry.Writer goroutine to write in a non-racey way. This does assume that -// there is a single call to Logger.Out for each message. -type channelWriter chan []byte - -func (cw channelWriter) Write(p []byte) (int, error) { - cw <- p - return len(p), nil -} - -func TestEntryWriter(t *testing.T) { - cw := channelWriter(make(chan []byte, 1)) - log := New() - log.Out = cw - log.Formatter = new(JSONFormatter) - log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n")) - - bs := <-cw - var fields Fields - err := json.Unmarshal(bs, &fields) - assert.Nil(t, err) - assert.Equal(t, fields["foo"], "bar") - assert.Equal(t, fields["level"], "warning") -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go new file mode 100644 index 00000000..72f679cd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,13 @@ +// Based on ssh/terminal: +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go index 4880d13d..62ca252d 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -1,10 +1,17 @@ // +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine,!gopherjs +// +build !appengine,!js package logrus -import "golang.org/x/sys/unix" +import ( + "io" + + "golang.org/x/sys/unix" +) const ioctlReadTermios = unix.TIOCGETA type Termios unix.Termios + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go index 3de08e80..2403de98 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -1,4 +1,4 @@ -// +build appengine gopherjs +// +build appengine package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 00000000..0c209750 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,11 @@ +// +build js + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index 067047a1..cf309d6f 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,4 +1,4 @@ -// +build !appengine,!gopherjs +// +build !appengine,!js,!windows package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 00000000..3b9d2864 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,20 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go index f29a0097..18066f08 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_linux.go +++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -3,12 +3,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !appengine,!gopherjs +// +build !appengine,!js package logrus -import "golang.org/x/sys/unix" +import ( + "io" + + "golang.org/x/sys/unix" +) const ioctlReadTermios = unix.TCGETS type Termios unix.Termios + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..b4ef5286 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,18 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 61b21cae..67fb686c 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,6 +3,7 @@ package logrus import ( "bytes" "fmt" + "os" "sort" "strings" "sync" @@ -20,6 +21,7 @@ const ( var ( baseTimestamp time.Time + emptyFieldMap FieldMap ) func init() { @@ -34,6 +36,9 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool @@ -50,60 +55,119 @@ type TextFormatter struct { // be desired. DisableSorting bool + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool - sync.Once + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + terminalInitOnce sync.Once } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) + + if f.isTerminal { + initTerminal(entry.Logger.Out) + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || f.isTerminal + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } } + + return isColored && !f.DisableColors } // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var b *bytes.Buffer + prefixFieldClashes(entry.Data, f.FieldMap) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } + fixedKeys := make([]string, 0, 3+len(entry.Data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if !f.DisableSorting { - sort.Strings(keys) + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) } + + var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } - prefixFieldClashes(entry.Data) - - f.Do(func() { f.init(entry) }) - - isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } - if isColored { + if f.isColored() { f.printColored(b, entry, keys, timestampFormat) } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) + for _, key := range fixedKeys { + var value interface{} + switch key { + case f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + default: + value = entry.Data[key] + } + f.appendKeyValue(b, key, value) } } @@ -124,7 +188,14 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = blue } - levelText := strings.ToUpper(entry.Level.String())[0:4] + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") if f.DisableTimestamp { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) diff --git a/vendor/github.com/sirupsen/logrus/text_formatter_test.go b/vendor/github.com/sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index d93b931e..00000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "fmt" - "strings" - "testing" - "time" -) - -func TestFormatting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - testCases := []struct { - value string - expected string - }{ - {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"}, - } - - for _, tc := range testCases { - b, _ := tf.Format(WithField("test", tc.value)) - - if string(b) != tc.expected { - t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) - } - } -} - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte("\"")) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "") - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(false, "/foobar") - checkQuoting(false, "foo_bar") - checkQuoting(false, "foo@bar") - checkQuoting(false, "foobar^") - checkQuoting(false, "+/-_^@f.oobar") - checkQuoting(true, "foobar$") - checkQuoting(true, "&foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) - - // Test for quoting empty fields. - tf.QuoteEmptyFields = true - checkQuoting(true, "") - checkQuoting(false, "abcd") - checkQuoting(true, errors.New("invalid argument")) -} - -func TestEscaping(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - testCases := []struct { - value string - expected string - }{ - {`ba"r`, `ba\"r`}, - {`ba'r`, `ba'r`}, - } - - for _, tc := range testCases { - b, _ := tf.Format(WithField("test", tc.value)) - if !bytes.Contains(b, []byte(tc.expected)) { - t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) - } - } -} - -func TestEscaping_Interface(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - ts := time.Now() - - testCases := []struct { - value interface{} - expected string - }{ - {ts, fmt.Sprintf("\"%s\"", ts.String())}, - {errors.New("error: something went wrong"), "\"error: something went wrong\""}, - } - - for _, tc := range testCases { - b, _ := tf.Format(WithField("test", tc.value)) - if !bytes.Contains(b, []byte(tc.expected)) { - t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) - } - } -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")] - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -func TestDisableTimestampWithColoredOutput(t *testing.T) { - tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} - - b, _ := tf.Format(WithField("test", "test")) - if strings.Contains(string(b), "[0000]") { - t.Error("timestamp not expected when DisableTimestamp is true") - } -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 1b8c7c26..00000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec5306..00000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 5afcb209..00000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -matrix: - include: - - go: 1.9.4 - - go: 1.10.0 - - go: tip - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build - - diff -u <(echo -n) <(gofmt -d -s .) - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi diff --git a/vendor/github.com/spf13/cobra/BUILD.bazel b/vendor/github.com/spf13/cobra/BUILD.bazel deleted file mode 100644 index 5d2b01c9..00000000 --- a/vendor/github.com/spf13/cobra/BUILD.bazel +++ /dev/null @@ -1,71 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "args.go", - "bash_completions.go", - "cobra.go", - "command.go", - "zsh_completions.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "command_notwin.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "command_win.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/spf13/cobra", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/spf13/pflag:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/inconshreveable/mousetrap:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = [ - "args_test.go", - "bash_completions_test.go", - "cobra_test.go", - "command_test.go", - "zsh_completions_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/spf13/cobra", - deps = ["//vendor/github.com/spf13/pflag:go_default_library"], -) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 851fcc08..00000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,736 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Moby (former Docker)](https://github.com/moby/moby) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [rclone](http://rclone.org/) -* [nehm](https://github.com/bogem/nehm) -* [Pouch](https://github.com/alibaba/pouch) - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) -- [Contributing](#contributing) -- [License](#license) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func init() { - cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} - -func initConfig() { - // Don't forget to read config either from cfgFile or from home directory! - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - if err := viper.ReadInConfig(); err != nil { - fmt.Println("Can't read config:", err) - os.Exit(1) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires at least one arg") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -# Contributing - -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/args_test.go b/vendor/github.com/spf13/cobra/args_test.go deleted file mode 100644 index d797b6f5..00000000 --- a/vendor/github.com/spf13/cobra/args_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package cobra - -import ( - "strings" - "testing" -) - -func TestNoArgs(t *testing.T) { - c := &Command{Use: "c", Args: NoArgs, Run: emptyRun} - - output, err := executeCommand(c) - if output != "" { - t.Errorf("Unexpected string: %v", output) - } - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestNoArgsWithArgs(t *testing.T) { - c := &Command{Use: "c", Args: NoArgs, Run: emptyRun} - - _, err := executeCommand(c, "illegal") - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := `unknown command "illegal" for "c"` - if got != expected { - t.Errorf("Expected: %q, got: %q", expected, got) - } -} - -func TestOnlyValidArgs(t *testing.T) { - c := &Command{ - Use: "c", - Args: OnlyValidArgs, - ValidArgs: []string{"one", "two"}, - Run: emptyRun, - } - - output, err := executeCommand(c, "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestOnlyValidArgsWithInvalidArgs(t *testing.T) { - c := &Command{ - Use: "c", - Args: OnlyValidArgs, - ValidArgs: []string{"one", "two"}, - Run: emptyRun, - } - - _, err := executeCommand(c, "three") - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := `invalid argument "three" for "c"` - if got != expected { - t.Errorf("Expected: %q, got: %q", expected, got) - } -} - -func TestArbitraryArgs(t *testing.T) { - c := &Command{Use: "c", Args: ArbitraryArgs, Run: emptyRun} - output, err := executeCommand(c, "a", "b") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestMinimumNArgs(t *testing.T) { - c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun} - output, err := executeCommand(c, "a", "b", "c") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestMinimumNArgsWithLessArgs(t *testing.T) { - c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun} - _, err := executeCommand(c, "a") - - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := "requires at least 2 arg(s), only received 1" - if got != expected { - t.Fatalf("Expected %q, got %q", expected, got) - } -} - -func TestMaximumNArgs(t *testing.T) { - c := &Command{Use: "c", Args: MaximumNArgs(3), Run: emptyRun} - output, err := executeCommand(c, "a", "b") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestMaximumNArgsWithMoreArgs(t *testing.T) { - c := &Command{Use: "c", Args: MaximumNArgs(2), Run: emptyRun} - _, err := executeCommand(c, "a", "b", "c") - - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := "accepts at most 2 arg(s), received 3" - if got != expected { - t.Fatalf("Expected %q, got %q", expected, got) - } -} - -func TestExactArgs(t *testing.T) { - c := &Command{Use: "c", Args: ExactArgs(3), Run: emptyRun} - output, err := executeCommand(c, "a", "b", "c") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestExactArgsWithInvalidCount(t *testing.T) { - c := &Command{Use: "c", Args: ExactArgs(2), Run: emptyRun} - _, err := executeCommand(c, "a", "b", "c") - - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := "accepts 2 arg(s), received 3" - if got != expected { - t.Fatalf("Expected %q, got %q", expected, got) - } -} - -func TestRangeArgs(t *testing.T) { - c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun} - output, err := executeCommand(c, "a", "b", "c") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestRangeArgsWithInvalidCount(t *testing.T) { - c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun} - _, err := executeCommand(c, "a") - - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := "accepts between 2 and 4 arg(s), received 1" - if got != expected { - t.Fatalf("Expected %q, got %q", expected, got) - } -} - -func TestRootTakesNoArgs(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - _, err := executeCommand(rootCmd, "illegal", "args") - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := `unknown command "illegal" for "root"` - if !strings.Contains(got, expected) { - t.Errorf("expected %q, got %q", expected, got) - } -} - -func TestRootTakesArgs(t *testing.T) { - rootCmd := &Command{Use: "root", Args: ArbitraryArgs, Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - _, err := executeCommand(rootCmd, "legal", "args") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } -} - -func TestChildTakesNoArgs(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Args: NoArgs, Run: emptyRun} - rootCmd.AddCommand(childCmd) - - _, err := executeCommand(rootCmd, "child", "illegal", "args") - if err == nil { - t.Fatal("Expected an error") - } - - got := err.Error() - expected := `unknown command "illegal" for "root child"` - if !strings.Contains(got, expected) { - t.Errorf("expected %q, got %q", expected, got) - } -} - -func TestChildTakesArgs(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Args: ArbitraryArgs, Run: emptyRun} - rootCmd.AddCommand(childCmd) - - _, err := executeCommand(rootCmd, "child", "legal", "args") - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index e79d4769..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,221 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specify custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify -a custom flag completion function with cobra.BashCompCustom: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go deleted file mode 100644 index 02a4f15b..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "regexp" - "strings" - "testing" -) - -func checkOmit(t *testing.T, found, unexpected string) { - if strings.Contains(found, unexpected) { - t.Errorf("Got: %q\nBut should not have!\n", unexpected) - } -} - -func check(t *testing.T, found, expected string) { - if !strings.Contains(found, expected) { - t.Errorf("Expecting to contain: \n %q\nGot:\n %q\n", expected, found) - } -} - -func checkRegex(t *testing.T, found, pattern string) { - matched, err := regexp.MatchString(pattern, found) - if err != nil { - t.Errorf("Error thrown performing MatchString: \n %s\n", err) - } - if !matched { - t.Errorf("Expecting to match: \n %q\nGot:\n %q\n", pattern, found) - } -} - -func runShellCheck(s string) error { - excluded := []string{ - "SC2034", // PREFIX appears unused. Verify it or export it. - } - cmd := exec.Command("shellcheck", "-s", "bash", "-", "-e", strings.Join(excluded, ",")) - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - - stdin, err := cmd.StdinPipe() - if err != nil { - return err - } - go func() { - stdin.Write([]byte(s)) - stdin.Close() - }() - - return cmd.Run() -} - -// World worst custom function, just keep telling you to enter hello! -const bashCompletionFunc = `__custom_func() { - COMPREPLY=( "hello" ) -} -` - -func TestBashCompletions(t *testing.T) { - rootCmd := &Command{ - Use: "root", - ArgAliases: []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"}, - ValidArgs: []string{"pod", "node", "service", "replicationcontroller"}, - BashCompletionFunction: bashCompletionFunc, - Run: emptyRun, - } - rootCmd.Flags().IntP("introot", "i", -1, "help message for flag introot") - rootCmd.MarkFlagRequired("introot") - - // Filename. - rootCmd.Flags().String("filename", "", "Enter a filename") - rootCmd.MarkFlagFilename("filename", "json", "yaml", "yml") - - // Persistent filename. - rootCmd.PersistentFlags().String("persistent-filename", "", "Enter a filename") - rootCmd.MarkPersistentFlagFilename("persistent-filename") - rootCmd.MarkPersistentFlagRequired("persistent-filename") - - // Filename extensions. - rootCmd.Flags().String("filename-ext", "", "Enter a filename (extension limited)") - rootCmd.MarkFlagFilename("filename-ext") - rootCmd.Flags().String("custom", "", "Enter a filename (extension limited)") - rootCmd.MarkFlagCustom("custom", "__complete_custom") - - // Subdirectories in a given directory. - rootCmd.Flags().String("theme", "", "theme to use (located in /themes/THEMENAME/)") - rootCmd.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"}) - - echoCmd := &Command{ - Use: "echo [string to echo]", - Aliases: []string{"say"}, - Short: "Echo anything to the screen", - Long: "an utterly useless command for testing.", - Example: "Just run cobra-test echo", - Run: emptyRun, - } - - echoCmd.Flags().String("filename", "", "Enter a filename") - echoCmd.MarkFlagFilename("filename", "json", "yaml", "yml") - echoCmd.Flags().String("config", "", "config to use (located in /config/PROFILE/)") - echoCmd.Flags().SetAnnotation("config", BashCompSubdirsInDir, []string{"config"}) - - printCmd := &Command{ - Use: "print [string to print]", - Args: MinimumNArgs(1), - Short: "Print anything to the screen", - Long: "an absolutely utterly useless command for testing.", - Run: emptyRun, - } - - deprecatedCmd := &Command{ - Use: "deprecated [can't do anything here]", - Args: NoArgs, - Short: "A command which is deprecated", - Long: "an absolutely utterly useless command for testing deprecation!.", - Deprecated: "Please use echo instead", - Run: emptyRun, - } - - colonCmd := &Command{ - Use: "cmd:colon", - Run: emptyRun, - } - - timesCmd := &Command{ - Use: "times [# times] [string to echo]", - SuggestFor: []string{"counts"}, - Args: OnlyValidArgs, - ValidArgs: []string{"one", "two", "three", "four"}, - Short: "Echo anything to the screen more times", - Long: "a slightly useless command for testing.", - Run: emptyRun, - } - - echoCmd.AddCommand(timesCmd) - rootCmd.AddCommand(echoCmd, printCmd, deprecatedCmd, colonCmd) - - buf := new(bytes.Buffer) - rootCmd.GenBashCompletion(buf) - output := buf.String() - - check(t, output, "_root") - check(t, output, "_root_echo") - check(t, output, "_root_echo_times") - check(t, output, "_root_print") - check(t, output, "_root_cmd__colon") - - // check for required flags - check(t, output, `must_have_one_flag+=("--introot=")`) - check(t, output, `must_have_one_flag+=("--persistent-filename=")`) - // check for custom completion function - check(t, output, `COMPREPLY=( "hello" )`) - // check for required nouns - check(t, output, `must_have_one_noun+=("pod")`) - // check for noun aliases - check(t, output, `noun_aliases+=("pods")`) - check(t, output, `noun_aliases+=("rc")`) - checkOmit(t, output, `must_have_one_noun+=("pods")`) - // check for filename extension flags - check(t, output, `flags_completion+=("_filedir")`) - // check for filename extension flags - check(t, output, `must_have_one_noun+=("three")`) - // check for filename extension flags - check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_filename_extension_flag json|yaml|yml")`, rootCmd.Name())) - // check for filename extension flags in a subcommand - checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_filename_extension_flag json\|yaml\|yml"\)`, rootCmd.Name())) - // check for custom flags - check(t, output, `flags_completion+=("__complete_custom")`) - // check for subdirs_in_dir flags - check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_subdirs_in_dir_flag themes")`, rootCmd.Name())) - // check for subdirs_in_dir flags in a subcommand - checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_subdirs_in_dir_flag config"\)`, rootCmd.Name())) - - checkOmit(t, output, deprecatedCmd.Name()) - - // If available, run shellcheck against the script. - if err := exec.Command("which", "shellcheck").Run(); err != nil { - return - } - if err := runShellCheck(output); err != nil { - t.Fatalf("shellcheck failed: %v", err) - } -} - -func TestBashCompletionHiddenFlag(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - - const flagName = "hiddenFlag" - c.Flags().Bool(flagName, false, "") - c.Flags().MarkHidden(flagName) - - buf := new(bytes.Buffer) - c.GenBashCompletion(buf) - output := buf.String() - - if strings.Contains(output, flagName) { - t.Errorf("Expected completion to not include %q flag: Got %v", flagName, output) - } -} - -func TestBashCompletionDeprecatedFlag(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - - const flagName = "deprecated-flag" - c.Flags().Bool(flagName, false, "") - c.Flags().MarkDeprecated(flagName, "use --not-deprecated instead") - - buf := new(bytes.Buffer) - c.GenBashCompletion(buf) - output := buf.String() - - if strings.Contains(output, flagName) { - t.Errorf("expected completion to not include %q flag: Got %v", flagName, output) - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spf13/cobra/cobra_test.go b/vendor/github.com/spf13/cobra/cobra_test.go deleted file mode 100644 index 0d1755bd..00000000 --- a/vendor/github.com/spf13/cobra/cobra_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cobra - -import ( - "testing" - "text/template" -) - -func TestAddTemplateFunctions(t *testing.T) { - AddTemplateFunc("t", func() bool { return true }) - AddTemplateFuncs(template.FuncMap{ - "f": func() bool { return false }, - "h": func() string { return "Hello," }, - "w": func() string { return "world." }}) - - c := &Command{} - c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`) - - const expected = "Hello, world." - if got := c.UsageString(); got != expected { - t.Errorf("Expected UsageString: %v\nGot: %v", expected, got) - } -} diff --git a/vendor/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go deleted file mode 100644 index ccee031d..00000000 --- a/vendor/github.com/spf13/cobra/command_test.go +++ /dev/null @@ -1,1733 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/spf13/pflag" -) - -func emptyRun(*Command, []string) {} - -func executeCommand(root *Command, args ...string) (output string, err error) { - _, output, err = executeCommandC(root, args...) - return output, err -} - -func executeCommandC(root *Command, args ...string) (c *Command, output string, err error) { - buf := new(bytes.Buffer) - root.SetOutput(buf) - root.SetArgs(args) - - c, err = root.ExecuteC() - - return c, buf.String(), err -} - -func resetCommandLineFlagSet() { - pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) -} - -func checkStringContains(t *testing.T, got, expected string) { - if !strings.Contains(got, expected) { - t.Errorf("Expected to contain: \n %v\nGot:\n %v\n", expected, got) - } -} - -func checkStringOmits(t *testing.T, got, expected string) { - if strings.Contains(got, expected) { - t.Errorf("Expected to not contain: \n %v\nGot: %v", expected, got) - } -} - -func TestSingleCommand(t *testing.T) { - var rootCmdArgs []string - rootCmd := &Command{ - Use: "root", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { rootCmdArgs = args }, - } - aCmd := &Command{Use: "a", Args: NoArgs, Run: emptyRun} - bCmd := &Command{Use: "b", Args: NoArgs, Run: emptyRun} - rootCmd.AddCommand(aCmd, bCmd) - - output, err := executeCommand(rootCmd, "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(rootCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("rootCmdArgs expected: %q, got: %q", expected, got) - } -} - -func TestChildCommand(t *testing.T) { - var child1CmdArgs []string - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - child1Cmd := &Command{ - Use: "child1", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { child1CmdArgs = args }, - } - child2Cmd := &Command{Use: "child2", Args: NoArgs, Run: emptyRun} - rootCmd.AddCommand(child1Cmd, child2Cmd) - - output, err := executeCommand(rootCmd, "child1", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(child1CmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("child1CmdArgs expected: %q, got: %q", expected, got) - } -} - -func TestCallCommandWithoutSubcommands(t *testing.T) { - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - _, err := executeCommand(rootCmd) - if err != nil { - t.Errorf("Calling command without subcommands should not have error: %v", err) - } -} - -func TestRootExecuteUnknownCommand(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) - - output, _ := executeCommand(rootCmd, "unknown") - - expected := "Error: unknown command \"unknown\" for \"root\"\nRun 'root --help' for usage.\n" - - if output != expected { - t.Errorf("Expected:\n %q\nGot:\n %q\n", expected, output) - } -} - -func TestSubcommandExecuteC(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - c, output, err := executeCommandC(rootCmd, "child") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if c.Name() != "child" { - t.Errorf(`invalid command returned from ExecuteC: expected "child"', got %q`, c.Name()) - } -} - -func TestRootUnknownCommandSilenced(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - rootCmd.SilenceErrors = true - rootCmd.SilenceUsage = true - rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) - - output, _ := executeCommand(rootCmd, "unknown") - if output != "" { - t.Errorf("Expected blank output, because of silenced usage.\nGot:\n %q\n", output) - } -} - -func TestCommandAlias(t *testing.T) { - var timesCmdArgs []string - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - echoCmd := &Command{ - Use: "echo", - Aliases: []string{"say", "tell"}, - Args: NoArgs, - Run: emptyRun, - } - timesCmd := &Command{ - Use: "times", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { timesCmdArgs = args }, - } - echoCmd.AddCommand(timesCmd) - rootCmd.AddCommand(echoCmd) - - output, err := executeCommand(rootCmd, "tell", "times", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(timesCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got) - } -} - -func TestEnablePrefixMatching(t *testing.T) { - EnablePrefixMatching = true - - var aCmdArgs []string - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - aCmd := &Command{ - Use: "aCmd", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { aCmdArgs = args }, - } - bCmd := &Command{Use: "bCmd", Args: NoArgs, Run: emptyRun} - rootCmd.AddCommand(aCmd, bCmd) - - output, err := executeCommand(rootCmd, "a", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(aCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("aCmdArgs expected: %q, got: %q", expected, got) - } - - EnablePrefixMatching = false -} - -func TestAliasPrefixMatching(t *testing.T) { - EnablePrefixMatching = true - - var timesCmdArgs []string - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - echoCmd := &Command{ - Use: "echo", - Aliases: []string{"say", "tell"}, - Args: NoArgs, - Run: emptyRun, - } - timesCmd := &Command{ - Use: "times", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { timesCmdArgs = args }, - } - echoCmd.AddCommand(timesCmd) - rootCmd.AddCommand(echoCmd) - - output, err := executeCommand(rootCmd, "sa", "times", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(timesCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got) - } - - EnablePrefixMatching = false -} - -// TestChildSameName checks the correct behaviour of cobra in cases, -// when an application with name "foo" and with subcommand "foo" -// is executed with args "foo foo". -func TestChildSameName(t *testing.T) { - var fooCmdArgs []string - rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun} - fooCmd := &Command{ - Use: "foo", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { fooCmdArgs = args }, - } - barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun} - rootCmd.AddCommand(fooCmd, barCmd) - - output, err := executeCommand(rootCmd, "foo", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(fooCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got) - } -} - -// TestGrandChildSameName checks the correct behaviour of cobra in cases, -// when user has a root command and a grand child -// with the same name. -func TestGrandChildSameName(t *testing.T) { - var fooCmdArgs []string - rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun} - barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun} - fooCmd := &Command{ - Use: "foo", - Args: ExactArgs(2), - Run: func(_ *Command, args []string) { fooCmdArgs = args }, - } - barCmd.AddCommand(fooCmd) - rootCmd.AddCommand(barCmd) - - output, err := executeCommand(rootCmd, "bar", "foo", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(fooCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got) - } -} - -func TestFlagLong(t *testing.T) { - var cArgs []string - c := &Command{ - Use: "c", - Args: ArbitraryArgs, - Run: func(_ *Command, args []string) { cArgs = args }, - } - - var intFlagValue int - var stringFlagValue string - c.Flags().IntVar(&intFlagValue, "intf", -1, "") - c.Flags().StringVar(&stringFlagValue, "sf", "", "") - - output, err := executeCommand(c, "--intf=7", "--sf=abc", "one", "--", "two") - if output != "" { - t.Errorf("Unexpected output: %v", err) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if c.ArgsLenAtDash() != 1 { - t.Errorf("Expected ArgsLenAtDash: %v but got %v", 1, c.ArgsLenAtDash()) - } - if intFlagValue != 7 { - t.Errorf("Expected intFlagValue: %v, got %v", 7, intFlagValue) - } - if stringFlagValue != "abc" { - t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue) - } - - got := strings.Join(cArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("Expected arguments: %q, got %q", expected, got) - } -} - -func TestFlagShort(t *testing.T) { - var cArgs []string - c := &Command{ - Use: "c", - Args: ArbitraryArgs, - Run: func(_ *Command, args []string) { cArgs = args }, - } - - var intFlagValue int - var stringFlagValue string - c.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") - c.Flags().StringVarP(&stringFlagValue, "sf", "s", "", "") - - output, err := executeCommand(c, "-i", "7", "-sabc", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", err) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if intFlagValue != 7 { - t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) - } - if stringFlagValue != "abc" { - t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue) - } - - got := strings.Join(cArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("Expected arguments: %q, got %q", expected, got) - } -} - -func TestChildFlag(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - var intFlagValue int - childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") - - output, err := executeCommand(rootCmd, "child", "-i7") - if output != "" { - t.Errorf("Unexpected output: %v", err) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if intFlagValue != 7 { - t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) - } -} - -func TestChildFlagWithParentLocalFlag(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - var intFlagValue int - rootCmd.Flags().StringP("sf", "s", "", "") - childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") - - _, err := executeCommand(rootCmd, "child", "-i7", "-sabc") - if err == nil { - t.Errorf("Invalid flag should generate error") - } - - checkStringContains(t, err.Error(), "unknown shorthand") - - if intFlagValue != 7 { - t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) - } -} - -func TestFlagInvalidInput(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - rootCmd.Flags().IntP("intf", "i", -1, "") - - _, err := executeCommand(rootCmd, "-iabc") - if err == nil { - t.Errorf("Invalid flag value should generate error") - } - - checkStringContains(t, err.Error(), "invalid syntax") -} - -func TestFlagBeforeCommand(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - var flagValue int - childCmd.Flags().IntVarP(&flagValue, "intf", "i", -1, "") - - // With short flag. - _, err := executeCommand(rootCmd, "-i7", "child") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if flagValue != 7 { - t.Errorf("Expected flag value: %v, got %v", 7, flagValue) - } - - // With long flag. - _, err = executeCommand(rootCmd, "--intf=8", "child") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if flagValue != 8 { - t.Errorf("Expected flag value: %v, got %v", 9, flagValue) - } -} - -func TestStripFlags(t *testing.T) { - tests := []struct { - input []string - output []string - }{ - { - []string{"foo", "bar"}, - []string{"foo", "bar"}, - }, - { - []string{"foo", "--str", "-s"}, - []string{"foo"}, - }, - { - []string{"-s", "foo", "--str", "bar"}, - []string{}, - }, - { - []string{"-i10", "echo"}, - []string{"echo"}, - }, - { - []string{"-i=10", "echo"}, - []string{"echo"}, - }, - { - []string{"--int=100", "echo"}, - []string{"echo"}, - }, - { - []string{"-ib", "echo", "-sfoo", "baz"}, - []string{"echo", "baz"}, - }, - { - []string{"-i=baz", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"--int=baz", "-sbar", "-i", "foo", "blah"}, - []string{"blah"}, - }, - { - []string{"--bool", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"-b", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"--persist", "bar"}, - []string{"bar"}, - }, - { - []string{"-p", "bar"}, - []string{"bar"}, - }, - } - - c := &Command{Use: "c", Run: emptyRun} - c.PersistentFlags().BoolP("persist", "p", false, "") - c.Flags().IntP("int", "i", -1, "") - c.Flags().StringP("str", "s", "", "") - c.Flags().BoolP("bool", "b", false, "") - - for i, test := range tests { - got := stripFlags(test.input, c) - if !reflect.DeepEqual(test.output, got) { - t.Errorf("(%v) Expected: %v, got: %v", i, test.output, got) - } - } -} - -func TestDisableFlagParsing(t *testing.T) { - var cArgs []string - c := &Command{ - Use: "c", - DisableFlagParsing: true, - Run: func(_ *Command, args []string) { - cArgs = args - }, - } - - args := []string{"cmd", "-v", "-race", "-file", "foo.go"} - output, err := executeCommand(c, args...) - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if !reflect.DeepEqual(args, cArgs) { - t.Errorf("Expected: %v, got: %v", args, cArgs) - } -} - -func TestPersistentFlagsOnSameCommand(t *testing.T) { - var rootCmdArgs []string - rootCmd := &Command{ - Use: "root", - Args: ArbitraryArgs, - Run: func(_ *Command, args []string) { rootCmdArgs = args }, - } - - var flagValue int - rootCmd.PersistentFlags().IntVarP(&flagValue, "intf", "i", -1, "") - - output, err := executeCommand(rootCmd, "-i7", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(rootCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("rootCmdArgs expected: %q, got %q", expected, got) - } - if flagValue != 7 { - t.Errorf("flagValue expected: %v, got %v", 7, flagValue) - } -} - -// TestEmptyInputs checks, -// if flags correctly parsed with blank strings in args. -func TestEmptyInputs(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - - var flagValue int - c.Flags().IntVarP(&flagValue, "intf", "i", -1, "") - - output, err := executeCommand(c, "", "-i7", "") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if flagValue != 7 { - t.Errorf("flagValue expected: %v, got %v", 7, flagValue) - } -} - -func TestOverwrittenFlag(t *testing.T) { - // TODO: This test fails, but should work. - t.Skip() - - parent := &Command{Use: "parent", Run: emptyRun} - child := &Command{Use: "child", Run: emptyRun} - - parent.PersistentFlags().Bool("boolf", false, "") - parent.PersistentFlags().Int("intf", -1, "") - child.Flags().String("strf", "", "") - child.Flags().Int("intf", -1, "") - - parent.AddCommand(child) - - childInherited := child.InheritedFlags() - childLocal := child.LocalFlags() - - if childLocal.Lookup("strf") == nil { - t.Error(`LocalFlags expected to contain "strf", got "nil"`) - } - if childInherited.Lookup("boolf") == nil { - t.Error(`InheritedFlags expected to contain "boolf", got "nil"`) - } - - if childInherited.Lookup("intf") != nil { - t.Errorf(`InheritedFlags should not contain overwritten flag "intf"`) - } - if childLocal.Lookup("intf") == nil { - t.Error(`LocalFlags expected to contain "intf", got "nil"`) - } -} - -func TestPersistentFlagsOnChild(t *testing.T) { - var childCmdArgs []string - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{ - Use: "child", - Args: ArbitraryArgs, - Run: func(_ *Command, args []string) { childCmdArgs = args }, - } - rootCmd.AddCommand(childCmd) - - var parentFlagValue int - var childFlagValue int - rootCmd.PersistentFlags().IntVarP(&parentFlagValue, "parentf", "p", -1, "") - childCmd.Flags().IntVarP(&childFlagValue, "childf", "c", -1, "") - - output, err := executeCommand(rootCmd, "child", "-c7", "-p8", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - got := strings.Join(childCmdArgs, " ") - expected := "one two" - if got != expected { - t.Errorf("childCmdArgs expected: %q, got %q", expected, got) - } - if parentFlagValue != 8 { - t.Errorf("parentFlagValue expected: %v, got %v", 8, parentFlagValue) - } - if childFlagValue != 7 { - t.Errorf("childFlagValue expected: %v, got %v", 7, childFlagValue) - } -} - -func TestRequiredFlags(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - c.Flags().String("foo1", "", "") - c.MarkFlagRequired("foo1") - c.Flags().String("foo2", "", "") - c.MarkFlagRequired("foo2") - c.Flags().String("bar", "", "") - - expected := fmt.Sprintf("required flag(s) %q, %q not set", "foo1", "foo2") - - _, err := executeCommand(c) - got := err.Error() - - if got != expected { - t.Errorf("Expected error: %q, got: %q", expected, got) - } -} - -func TestPersistentRequiredFlags(t *testing.T) { - parent := &Command{Use: "parent", Run: emptyRun} - parent.PersistentFlags().String("foo1", "", "") - parent.MarkPersistentFlagRequired("foo1") - parent.PersistentFlags().String("foo2", "", "") - parent.MarkPersistentFlagRequired("foo2") - parent.Flags().String("foo3", "", "") - - child := &Command{Use: "child", Run: emptyRun} - child.Flags().String("bar1", "", "") - child.MarkFlagRequired("bar1") - child.Flags().String("bar2", "", "") - child.MarkFlagRequired("bar2") - child.Flags().String("bar3", "", "") - - parent.AddCommand(child) - - expected := fmt.Sprintf("required flag(s) %q, %q, %q, %q not set", "bar1", "bar2", "foo1", "foo2") - - _, err := executeCommand(parent, "child") - if err.Error() != expected { - t.Errorf("Expected %q, got %q", expected, err.Error()) - } -} - -func TestInitHelpFlagMergesFlags(t *testing.T) { - usage := "custom flag" - rootCmd := &Command{Use: "root"} - rootCmd.PersistentFlags().Bool("help", false, "custom flag") - childCmd := &Command{Use: "child"} - rootCmd.AddCommand(childCmd) - - childCmd.InitDefaultHelpFlag() - got := childCmd.Flags().Lookup("help").Usage - if got != usage { - t.Errorf("Expected the help flag from the root command with usage: %v\nGot the default with usage: %v", usage, got) - } -} - -func TestHelpCommandExecuted(t *testing.T) { - rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun} - rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) - - output, err := executeCommand(rootCmd, "help") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, rootCmd.Long) -} - -func TestHelpCommandExecutedOnChild(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - output, err := executeCommand(rootCmd, "help", "child") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, childCmd.Long) -} - -func TestSetHelpCommand(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - c.AddCommand(&Command{Use: "empty", Run: emptyRun}) - - expected := "WORKS" - c.SetHelpCommand(&Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: func(c *Command, _ []string) { c.Print(expected) }, - }) - - got, err := executeCommand(c, "help") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if got != expected { - t.Errorf("Expected to contain %q, got %q", expected, got) - } -} - -func TestHelpFlagExecuted(t *testing.T) { - rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun} - - output, err := executeCommand(rootCmd, "--help") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, rootCmd.Long) -} - -func TestHelpFlagExecutedOnChild(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - output, err := executeCommand(rootCmd, "child", "--help") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, childCmd.Long) -} - -// TestHelpFlagInHelp checks, -// if '--help' flag is shown in help for child (executing `parent help child`), -// that has no other flags. -// Related to https://github.com/spf13/cobra/issues/302. -func TestHelpFlagInHelp(t *testing.T) { - parentCmd := &Command{Use: "parent", Run: func(*Command, []string) {}} - - childCmd := &Command{Use: "child", Run: func(*Command, []string) {}} - parentCmd.AddCommand(childCmd) - - output, err := executeCommand(parentCmd, "help", "child") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, "[flags]") -} - -func TestFlagsInUsage(t *testing.T) { - rootCmd := &Command{Use: "root", Args: NoArgs, Run: func(*Command, []string) {}} - output, err := executeCommand(rootCmd, "--help") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, "[flags]") -} - -func TestHelpExecutedOnNonRunnableChild(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Long: "Long description"} - rootCmd.AddCommand(childCmd) - - output, err := executeCommand(rootCmd, "child") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, childCmd.Long) -} - -func TestVersionFlagExecuted(t *testing.T) { - rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} - - output, err := executeCommand(rootCmd, "--version", "arg1") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, "root version 1.0.0") -} - -func TestVersionTemplate(t *testing.T) { - rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} - rootCmd.SetVersionTemplate(`customized version: {{.Version}}`) - - output, err := executeCommand(rootCmd, "--version", "arg1") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, "customized version: 1.0.0") -} - -func TestVersionFlagExecutedOnSubcommand(t *testing.T) { - rootCmd := &Command{Use: "root", Version: "1.0.0"} - rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun}) - - output, err := executeCommand(rootCmd, "--version", "sub") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, "root version 1.0.0") -} - -func TestVersionFlagOnlyAddedToRoot(t *testing.T) { - rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} - rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun}) - - _, err := executeCommand(rootCmd, "sub", "--version") - if err == nil { - t.Errorf("Expected error") - } - - checkStringContains(t, err.Error(), "unknown flag: --version") -} - -func TestVersionFlagOnlyExistsIfVersionNonEmpty(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - - _, err := executeCommand(rootCmd, "--version") - if err == nil { - t.Errorf("Expected error") - } - checkStringContains(t, err.Error(), "unknown flag: --version") -} - -func TestUsageIsNotPrintedTwice(t *testing.T) { - var cmd = &Command{Use: "root"} - var sub = &Command{Use: "sub"} - cmd.AddCommand(sub) - - output, _ := executeCommand(cmd, "") - if strings.Count(output, "Usage:") != 1 { - t.Error("Usage output is not printed exactly once") - } -} - -func TestVisitParents(t *testing.T) { - c := &Command{Use: "app"} - sub := &Command{Use: "sub"} - dsub := &Command{Use: "dsub"} - sub.AddCommand(dsub) - c.AddCommand(sub) - - total := 0 - add := func(x *Command) { - total++ - } - sub.VisitParents(add) - if total != 1 { - t.Errorf("Should have visited 1 parent but visited %d", total) - } - - total = 0 - dsub.VisitParents(add) - if total != 2 { - t.Errorf("Should have visited 2 parents but visited %d", total) - } - - total = 0 - c.VisitParents(add) - if total != 0 { - t.Errorf("Should have visited no parents but visited %d", total) - } -} - -func TestSuggestions(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - timesCmd := &Command{ - Use: "times", - SuggestFor: []string{"counts"}, - Run: emptyRun, - } - rootCmd.AddCommand(timesCmd) - - templateWithSuggestions := "Error: unknown command \"%s\" for \"root\"\n\nDid you mean this?\n\t%s\n\nRun 'root --help' for usage.\n" - templateWithoutSuggestions := "Error: unknown command \"%s\" for \"root\"\nRun 'root --help' for usage.\n" - - tests := map[string]string{ - "time": "times", - "tiems": "times", - "tims": "times", - "timeS": "times", - "rimes": "times", - "ti": "times", - "t": "times", - "timely": "times", - "ri": "", - "timezone": "", - "foo": "", - "counts": "times", - } - - for typo, suggestion := range tests { - for _, suggestionsDisabled := range []bool{true, false} { - rootCmd.DisableSuggestions = suggestionsDisabled - - var expected string - output, _ := executeCommand(rootCmd, typo) - - if suggestion == "" || suggestionsDisabled { - expected = fmt.Sprintf(templateWithoutSuggestions, typo) - } else { - expected = fmt.Sprintf(templateWithSuggestions, typo, suggestion) - } - - if output != expected { - t.Errorf("Unexpected response.\nExpected:\n %q\nGot:\n %q\n", expected, output) - } - } - } -} - -func TestRemoveCommand(t *testing.T) { - rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - rootCmd.RemoveCommand(childCmd) - - _, err := executeCommand(rootCmd, "child") - if err == nil { - t.Error("Expected error on calling removed command. Got nil.") - } -} - -func TestReplaceCommandWithRemove(t *testing.T) { - childUsed := 0 - rootCmd := &Command{Use: "root", Run: emptyRun} - child1Cmd := &Command{ - Use: "child", - Run: func(*Command, []string) { childUsed = 1 }, - } - child2Cmd := &Command{ - Use: "child", - Run: func(*Command, []string) { childUsed = 2 }, - } - rootCmd.AddCommand(child1Cmd) - rootCmd.RemoveCommand(child1Cmd) - rootCmd.AddCommand(child2Cmd) - - output, err := executeCommand(rootCmd, "child") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if childUsed == 1 { - t.Error("Removed command shouldn't be called") - } - if childUsed != 2 { - t.Error("Replacing command should have been called but didn't") - } -} - -func TestDeprecatedCommand(t *testing.T) { - rootCmd := &Command{Use: "root", Run: emptyRun} - deprecatedCmd := &Command{ - Use: "deprecated", - Deprecated: "This command is deprecated", - Run: emptyRun, - } - rootCmd.AddCommand(deprecatedCmd) - - output, err := executeCommand(rootCmd, "deprecated") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - checkStringContains(t, output, deprecatedCmd.Deprecated) -} - -func TestHooks(t *testing.T) { - var ( - persPreArgs string - preArgs string - runArgs string - postArgs string - persPostArgs string - ) - - c := &Command{ - Use: "c", - PersistentPreRun: func(_ *Command, args []string) { - persPreArgs = strings.Join(args, " ") - }, - PreRun: func(_ *Command, args []string) { - preArgs = strings.Join(args, " ") - }, - Run: func(_ *Command, args []string) { - runArgs = strings.Join(args, " ") - }, - PostRun: func(_ *Command, args []string) { - postArgs = strings.Join(args, " ") - }, - PersistentPostRun: func(_ *Command, args []string) { - persPostArgs = strings.Join(args, " ") - }, - } - - output, err := executeCommand(c, "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if persPreArgs != "one two" { - t.Errorf("Expected persPreArgs %q, got %q", "one two", persPreArgs) - } - if preArgs != "one two" { - t.Errorf("Expected preArgs %q, got %q", "one two", preArgs) - } - if runArgs != "one two" { - t.Errorf("Expected runArgs %q, got %q", "one two", runArgs) - } - if postArgs != "one two" { - t.Errorf("Expected postArgs %q, got %q", "one two", postArgs) - } - if persPostArgs != "one two" { - t.Errorf("Expected persPostArgs %q, got %q", "one two", persPostArgs) - } -} - -func TestPersistentHooks(t *testing.T) { - var ( - parentPersPreArgs string - parentPreArgs string - parentRunArgs string - parentPostArgs string - parentPersPostArgs string - ) - - var ( - childPersPreArgs string - childPreArgs string - childRunArgs string - childPostArgs string - childPersPostArgs string - ) - - parentCmd := &Command{ - Use: "parent", - PersistentPreRun: func(_ *Command, args []string) { - parentPersPreArgs = strings.Join(args, " ") - }, - PreRun: func(_ *Command, args []string) { - parentPreArgs = strings.Join(args, " ") - }, - Run: func(_ *Command, args []string) { - parentRunArgs = strings.Join(args, " ") - }, - PostRun: func(_ *Command, args []string) { - parentPostArgs = strings.Join(args, " ") - }, - PersistentPostRun: func(_ *Command, args []string) { - parentPersPostArgs = strings.Join(args, " ") - }, - } - - childCmd := &Command{ - Use: "child", - PersistentPreRun: func(_ *Command, args []string) { - childPersPreArgs = strings.Join(args, " ") - }, - PreRun: func(_ *Command, args []string) { - childPreArgs = strings.Join(args, " ") - }, - Run: func(_ *Command, args []string) { - childRunArgs = strings.Join(args, " ") - }, - PostRun: func(_ *Command, args []string) { - childPostArgs = strings.Join(args, " ") - }, - PersistentPostRun: func(_ *Command, args []string) { - childPersPostArgs = strings.Join(args, " ") - }, - } - parentCmd.AddCommand(childCmd) - - output, err := executeCommand(parentCmd, "child", "one", "two") - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // TODO: This test fails, but should not. - // Related to https://github.com/spf13/cobra/issues/252. - // - // if parentPersPreArgs != "one two" { - // t.Errorf("Expected parentPersPreArgs %q, got %q", "one two", parentPersPreArgs) - // } - if parentPreArgs != "" { - t.Errorf("Expected blank parentPreArgs, got %q", parentPreArgs) - } - if parentRunArgs != "" { - t.Errorf("Expected blank parentRunArgs, got %q", parentRunArgs) - } - if parentPostArgs != "" { - t.Errorf("Expected blank parentPostArgs, got %q", parentPostArgs) - } - // TODO: This test fails, but should not. - // Related to https://github.com/spf13/cobra/issues/252. - // - // if parentPersPostArgs != "one two" { - // t.Errorf("Expected parentPersPostArgs %q, got %q", "one two", parentPersPostArgs) - // } - - if childPersPreArgs != "one two" { - t.Errorf("Expected childPersPreArgs %q, got %q", "one two", childPersPreArgs) - } - if childPreArgs != "one two" { - t.Errorf("Expected childPreArgs %q, got %q", "one two", childPreArgs) - } - if childRunArgs != "one two" { - t.Errorf("Expected childRunArgs %q, got %q", "one two", childRunArgs) - } - if childPostArgs != "one two" { - t.Errorf("Expected childPostArgs %q, got %q", "one two", childPostArgs) - } - if childPersPostArgs != "one two" { - t.Errorf("Expected childPersPostArgs %q, got %q", "one two", childPersPostArgs) - } -} - -// Related to https://github.com/spf13/cobra/issues/521. -func TestGlobalNormFuncPropagation(t *testing.T) { - normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(name) - } - - rootCmd := &Command{Use: "root", Run: emptyRun} - childCmd := &Command{Use: "child", Run: emptyRun} - rootCmd.AddCommand(childCmd) - - rootCmd.SetGlobalNormalizationFunc(normFunc) - if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() { - t.Error("rootCmd seems to have a wrong normalization function") - } - - if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(childCmd.GlobalNormalizationFunc()).Pointer() { - t.Error("childCmd should have had the normalization function of rootCmd") - } -} - -// Related to https://github.com/spf13/cobra/issues/521. -func TestNormPassedOnLocal(t *testing.T) { - toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(strings.ToUpper(name)) - } - - c := &Command{} - c.Flags().Bool("flagname", true, "this is a dummy flag") - c.SetGlobalNormalizationFunc(toUpper) - if c.LocalFlags().Lookup("flagname") != c.LocalFlags().Lookup("FLAGNAME") { - t.Error("Normalization function should be passed on to Local flag set") - } -} - -// Related to https://github.com/spf13/cobra/issues/521. -func TestNormPassedOnInherited(t *testing.T) { - toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(strings.ToUpper(name)) - } - - c := &Command{} - c.SetGlobalNormalizationFunc(toUpper) - - child1 := &Command{} - c.AddCommand(child1) - - c.PersistentFlags().Bool("flagname", true, "") - - child2 := &Command{} - c.AddCommand(child2) - - inherited := child1.InheritedFlags() - if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") { - t.Error("Normalization function should be passed on to inherited flag set in command added before flag") - } - - inherited = child2.InheritedFlags() - if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") { - t.Error("Normalization function should be passed on to inherited flag set in command added after flag") - } -} - -// Related to https://github.com/spf13/cobra/issues/521. -func TestConsistentNormalizedName(t *testing.T) { - toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(strings.ToUpper(name)) - } - n := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(name) - } - - c := &Command{} - c.Flags().Bool("flagname", true, "") - c.SetGlobalNormalizationFunc(toUpper) - c.SetGlobalNormalizationFunc(n) - - if c.LocalFlags().Lookup("flagname") == c.LocalFlags().Lookup("FLAGNAME") { - t.Error("Normalizing flag names should not result in duplicate flags") - } -} - -func TestFlagOnPflagCommandLine(t *testing.T) { - flagName := "flagOnCommandLine" - pflag.String(flagName, "", "about my flag") - - c := &Command{Use: "c", Run: emptyRun} - c.AddCommand(&Command{Use: "child", Run: emptyRun}) - - output, _ := executeCommand(c, "--help") - checkStringContains(t, output, flagName) - - resetCommandLineFlagSet() -} - -// TestHiddenCommandExecutes checks, -// if hidden commands run as intended. -func TestHiddenCommandExecutes(t *testing.T) { - executed := false - c := &Command{ - Use: "c", - Hidden: true, - Run: func(*Command, []string) { executed = true }, - } - - output, err := executeCommand(c) - if output != "" { - t.Errorf("Unexpected output: %v", output) - } - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if !executed { - t.Error("Hidden command should have been executed") - } -} - -// test to ensure hidden commands do not show up in usage/help text -func TestHiddenCommandIsHidden(t *testing.T) { - c := &Command{Use: "c", Hidden: true, Run: emptyRun} - if c.IsAvailableCommand() { - t.Errorf("Hidden command should be unavailable") - } -} - -func TestCommandsAreSorted(t *testing.T) { - EnableCommandSorting = true - - originalNames := []string{"middle", "zlast", "afirst"} - expectedNames := []string{"afirst", "middle", "zlast"} - - var rootCmd = &Command{Use: "root"} - - for _, name := range originalNames { - rootCmd.AddCommand(&Command{Use: name}) - } - - for i, c := range rootCmd.Commands() { - got := c.Name() - if expectedNames[i] != got { - t.Errorf("Expected: %s, got: %s", expectedNames[i], got) - } - } - - EnableCommandSorting = true -} - -func TestEnableCommandSortingIsDisabled(t *testing.T) { - EnableCommandSorting = false - - originalNames := []string{"middle", "zlast", "afirst"} - - var rootCmd = &Command{Use: "root"} - - for _, name := range originalNames { - rootCmd.AddCommand(&Command{Use: name}) - } - - for i, c := range rootCmd.Commands() { - got := c.Name() - if originalNames[i] != got { - t.Errorf("expected: %s, got: %s", originalNames[i], got) - } - } - - EnableCommandSorting = true -} - -func TestSetOutput(t *testing.T) { - c := &Command{} - c.SetOutput(nil) - if out := c.OutOrStdout(); out != os.Stdout { - t.Errorf("Expected setting output to nil to revert back to stdout") - } -} - -func TestFlagErrorFunc(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - - expectedFmt := "This is expected: %v" - c.SetFlagErrorFunc(func(_ *Command, err error) error { - return fmt.Errorf(expectedFmt, err) - }) - - _, err := executeCommand(c, "--unknown-flag") - - got := err.Error() - expected := fmt.Sprintf(expectedFmt, "unknown flag: --unknown-flag") - if got != expected { - t.Errorf("Expected %v, got %v", expected, got) - } -} - -// TestSortedFlags checks, -// if cmd.LocalFlags() is unsorted when cmd.Flags().SortFlags set to false. -// Related to https://github.com/spf13/cobra/issues/404. -func TestSortedFlags(t *testing.T) { - c := &Command{} - c.Flags().SortFlags = false - names := []string{"C", "B", "A", "D"} - for _, name := range names { - c.Flags().Bool(name, false, "") - } - - i := 0 - c.LocalFlags().VisitAll(func(f *pflag.Flag) { - if i == len(names) { - return - } - if stringInSlice(f.Name, names) { - if names[i] != f.Name { - t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) - } - i++ - } - }) -} - -// TestMergeCommandLineToFlags checks, -// if pflag.CommandLine is correctly merged to c.Flags() after first call -// of c.mergePersistentFlags. -// Related to https://github.com/spf13/cobra/issues/443. -func TestMergeCommandLineToFlags(t *testing.T) { - pflag.Bool("boolflag", false, "") - c := &Command{Use: "c", Run: emptyRun} - c.mergePersistentFlags() - if c.Flags().Lookup("boolflag") == nil { - t.Fatal("Expecting to have flag from CommandLine in c.Flags()") - } - - resetCommandLineFlagSet() -} - -// TestUseDeprecatedFlags checks, -// if cobra.Execute() prints a message, if a deprecated flag is used. -// Related to https://github.com/spf13/cobra/issues/463. -func TestUseDeprecatedFlags(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - c.Flags().BoolP("deprecated", "d", false, "deprecated flag") - c.Flags().MarkDeprecated("deprecated", "This flag is deprecated") - - output, err := executeCommand(c, "c", "-d") - if err != nil { - t.Error("Unexpected error:", err) - } - checkStringContains(t, output, "This flag is deprecated") -} - -func TestTraverseWithParentFlags(t *testing.T) { - rootCmd := &Command{Use: "root", TraverseChildren: true} - rootCmd.Flags().String("str", "", "") - rootCmd.Flags().BoolP("bool", "b", false, "") - - childCmd := &Command{Use: "child"} - childCmd.Flags().Int("int", -1, "") - - rootCmd.AddCommand(childCmd) - - c, args, err := rootCmd.Traverse([]string{"-b", "--str", "ok", "child", "--int"}) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if len(args) != 1 && args[0] != "--add" { - t.Errorf("Wrong args: %v", args) - } - if c.Name() != childCmd.Name() { - t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name()) - } -} - -func TestTraverseNoParentFlags(t *testing.T) { - rootCmd := &Command{Use: "root", TraverseChildren: true} - rootCmd.Flags().String("foo", "", "foo things") - - childCmd := &Command{Use: "child"} - childCmd.Flags().String("str", "", "") - rootCmd.AddCommand(childCmd) - - c, args, err := rootCmd.Traverse([]string{"child"}) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if len(args) != 0 { - t.Errorf("Wrong args %v", args) - } - if c.Name() != childCmd.Name() { - t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name()) - } -} - -func TestTraverseWithBadParentFlags(t *testing.T) { - rootCmd := &Command{Use: "root", TraverseChildren: true} - - childCmd := &Command{Use: "child"} - childCmd.Flags().String("str", "", "") - rootCmd.AddCommand(childCmd) - - expected := "unknown flag: --str" - - c, _, err := rootCmd.Traverse([]string{"--str", "ok", "child"}) - if err == nil || !strings.Contains(err.Error(), expected) { - t.Errorf("Expected error, %q, got %q", expected, err) - } - if c != nil { - t.Errorf("Expected nil command") - } -} - -func TestTraverseWithBadChildFlag(t *testing.T) { - rootCmd := &Command{Use: "root", TraverseChildren: true} - rootCmd.Flags().String("str", "", "") - - childCmd := &Command{Use: "child"} - rootCmd.AddCommand(childCmd) - - // Expect no error because the last commands args shouldn't be parsed in - // Traverse. - c, args, err := rootCmd.Traverse([]string{"child", "--str"}) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if len(args) != 1 && args[0] != "--str" { - t.Errorf("Wrong args: %v", args) - } - if c.Name() != childCmd.Name() { - t.Errorf("Expected command %q, got: %q", childCmd.Name(), c.Name()) - } -} - -func TestTraverseWithTwoSubcommands(t *testing.T) { - rootCmd := &Command{Use: "root", TraverseChildren: true} - - subCmd := &Command{Use: "sub", TraverseChildren: true} - rootCmd.AddCommand(subCmd) - - subsubCmd := &Command{ - Use: "subsub", - } - subCmd.AddCommand(subsubCmd) - - c, _, err := rootCmd.Traverse([]string{"sub", "subsub"}) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if c.Name() != subsubCmd.Name() { - t.Fatalf("Expected command: %q, got %q", subsubCmd.Name(), c.Name()) - } -} - -// TestUpdateName checks if c.Name() updates on changed c.Use. -// Related to https://github.com/spf13/cobra/pull/422#discussion_r143918343. -func TestUpdateName(t *testing.T) { - c := &Command{Use: "name xyz"} - originalName := c.Name() - - c.Use = "changedName abc" - if originalName == c.Name() || c.Name() != "changedName" { - t.Error("c.Name() should be updated on changed c.Use") - } -} - -type calledAsTestcase struct { - args []string - call string - want string - epm bool - tc bool -} - -func (tc *calledAsTestcase) test(t *testing.T) { - defer func(ov bool) { EnablePrefixMatching = ov }(EnablePrefixMatching) - EnablePrefixMatching = tc.epm - - var called *Command - run := func(c *Command, _ []string) { t.Logf("called: %q", c.Name()); called = c } - - parent := &Command{Use: "parent", Run: run} - child1 := &Command{Use: "child1", Run: run, Aliases: []string{"this"}} - child2 := &Command{Use: "child2", Run: run, Aliases: []string{"that"}} - - parent.AddCommand(child1) - parent.AddCommand(child2) - parent.SetArgs(tc.args) - - output := new(bytes.Buffer) - parent.SetOutput(output) - - parent.Execute() - - if called == nil { - if tc.call != "" { - t.Errorf("missing expected call to command: %s", tc.call) - } - return - } - - if called.Name() != tc.call { - t.Errorf("called command == %q; Wanted %q", called.Name(), tc.call) - } else if got := called.CalledAs(); got != tc.want { - t.Errorf("%s.CalledAs() == %q; Wanted: %q", tc.call, got, tc.want) - } -} - -func TestCalledAs(t *testing.T) { - tests := map[string]calledAsTestcase{ - "find/no-args": {nil, "parent", "parent", false, false}, - "find/real-name": {[]string{"child1"}, "child1", "child1", false, false}, - "find/full-alias": {[]string{"that"}, "child2", "that", false, false}, - "find/part-no-prefix": {[]string{"thi"}, "", "", false, false}, - "find/part-alias": {[]string{"thi"}, "child1", "this", true, false}, - "find/conflict": {[]string{"th"}, "", "", true, false}, - "traverse/no-args": {nil, "parent", "parent", false, true}, - "traverse/real-name": {[]string{"child1"}, "child1", "child1", false, true}, - "traverse/full-alias": {[]string{"that"}, "child2", "that", false, true}, - "traverse/part-no-prefix": {[]string{"thi"}, "", "", false, true}, - "traverse/part-alias": {[]string{"thi"}, "child1", "this", true, true}, - "traverse/conflict": {[]string{"th"}, "", "", true, true}, - } - - for name, tc := range tests { - t.Run(name, tc.test) - } -} - -func TestFParseErrWhitelistBackwardCompatibility(t *testing.T) { - c := &Command{Use: "c", Run: emptyRun} - c.Flags().BoolP("boola", "a", false, "a boolean flag") - - output, err := executeCommand(c, "c", "-a", "--unknown", "flag") - if err == nil { - t.Error("expected unknown flag error") - } - checkStringContains(t, output, "unknown flag: --unknown") -} - -func TestFParseErrWhitelistSameCommand(t *testing.T) { - c := &Command{ - Use: "c", - Run: emptyRun, - FParseErrWhitelist: FParseErrWhitelist{ - UnknownFlags: true, - }, - } - c.Flags().BoolP("boola", "a", false, "a boolean flag") - - _, err := executeCommand(c, "c", "-a", "--unknown", "flag") - if err != nil { - t.Error("unexpected error: ", err) - } -} - -func TestFParseErrWhitelistParentCommand(t *testing.T) { - root := &Command{ - Use: "root", - Run: emptyRun, - FParseErrWhitelist: FParseErrWhitelist{ - UnknownFlags: true, - }, - } - - c := &Command{ - Use: "child", - Run: emptyRun, - } - c.Flags().BoolP("boola", "a", false, "a boolean flag") - - root.AddCommand(c) - - output, err := executeCommand(root, "child", "-a", "--unknown", "flag") - if err == nil { - t.Error("expected unknown flag error") - } - checkStringContains(t, output, "unknown flag: --unknown") -} - -func TestFParseErrWhitelistChildCommand(t *testing.T) { - root := &Command{ - Use: "root", - Run: emptyRun, - } - - c := &Command{ - Use: "child", - Run: emptyRun, - FParseErrWhitelist: FParseErrWhitelist{ - UnknownFlags: true, - }, - } - c.Flags().BoolP("boola", "a", false, "a boolean flag") - - root.AddCommand(c) - - _, err := executeCommand(root, "child", "-a", "--unknown", "flag") - if err != nil { - t.Error("unexpected error: ", err.Error()) - } -} - -func TestFParseErrWhitelistSiblingCommand(t *testing.T) { - root := &Command{ - Use: "root", - Run: emptyRun, - } - - c := &Command{ - Use: "child", - Run: emptyRun, - FParseErrWhitelist: FParseErrWhitelist{ - UnknownFlags: true, - }, - } - c.Flags().BoolP("boola", "a", false, "a boolean flag") - - s := &Command{ - Use: "sibling", - Run: emptyRun, - } - s.Flags().BoolP("boolb", "b", false, "a boolean flag") - - root.AddCommand(c) - root.AddCommand(s) - - output, err := executeCommand(root, "sibling", "-b", "--unknown", "flag") - if err == nil { - t.Error("expected unknown flag error") - } - checkStringContains(t, output, "unknown flag: --unknown") -} diff --git a/vendor/github.com/spf13/cobra/zsh_completions_test.go b/vendor/github.com/spf13/cobra/zsh_completions_test.go deleted file mode 100644 index 34e69496..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package cobra - -import ( - "bytes" - "strings" - "testing" -) - -func TestZshCompletion(t *testing.T) { - tcs := []struct { - name string - root *Command - expectedExpressions []string - }{ - { - name: "trivial", - root: &Command{Use: "trivialapp"}, - expectedExpressions: []string{"#compdef trivial"}, - }, - { - name: "linear", - root: func() *Command { - r := &Command{Use: "linear"} - - sub1 := &Command{Use: "sub1"} - r.AddCommand(sub1) - - sub2 := &Command{Use: "sub2"} - sub1.AddCommand(sub2) - - sub3 := &Command{Use: "sub3"} - sub2.AddCommand(sub3) - return r - }(), - expectedExpressions: []string{"sub1", "sub2", "sub3"}, - }, - { - name: "flat", - root: func() *Command { - r := &Command{Use: "flat"} - r.AddCommand(&Command{Use: "c1"}) - r.AddCommand(&Command{Use: "c2"}) - return r - }(), - expectedExpressions: []string{"(c1 c2)"}, - }, - { - name: "tree", - root: func() *Command { - r := &Command{Use: "tree"} - - sub1 := &Command{Use: "sub1"} - r.AddCommand(sub1) - - sub11 := &Command{Use: "sub11"} - sub12 := &Command{Use: "sub12"} - - sub1.AddCommand(sub11) - sub1.AddCommand(sub12) - - sub2 := &Command{Use: "sub2"} - r.AddCommand(sub2) - - sub21 := &Command{Use: "sub21"} - sub22 := &Command{Use: "sub22"} - - sub2.AddCommand(sub21) - sub2.AddCommand(sub22) - - return r - }(), - expectedExpressions: []string{"(sub11 sub12)", "(sub21 sub22)"}, - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - buf := new(bytes.Buffer) - tc.root.GenZshCompletion(buf) - output := buf.String() - - for _, expectedExpression := range tc.expectedExpressions { - if !strings.Contains(output, expectedExpression) { - t.Errorf("Expected completion to contain %q somewhere; got %q", expectedExpression, output) - } - } - }) - } -} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da2901..00000000 --- a/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index f8a63b30..00000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false - -language: go - -go: - - 1.7.3 - - 1.8.1 - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get github.com/golang/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/BUILD.bazel b/vendor/github.com/spf13/pflag/BUILD.bazel deleted file mode 100644 index 05577796..00000000 --- a/vendor/github.com/spf13/pflag/BUILD.bazel +++ /dev/null @@ -1,69 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "bool.go", - "bool_slice.go", - "bytes.go", - "count.go", - "duration.go", - "duration_slice.go", - "flag.go", - "float32.go", - "float64.go", - "golangflag.go", - "int.go", - "int16.go", - "int32.go", - "int64.go", - "int8.go", - "int_slice.go", - "ip.go", - "ip_slice.go", - "ipmask.go", - "ipnet.go", - "string.go", - "string_array.go", - "string_slice.go", - "uint.go", - "uint16.go", - "uint32.go", - "uint64.go", - "uint8.go", - "uint_slice.go", - ], - importpath = "github.com/spf13/pflag", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "bool_slice_test.go", - "bool_test.go", - "bytes_test.go", - "count_test.go", - "duration_slice_test.go", - "export_test.go", - "flag_test.go", - "golangflag_test.go", - "int_slice_test.go", - "ip_slice_test.go", - "ip_test.go", - "ipnet_test.go", - "printusage_test.go", - "string_array_test.go", - "string_slice_test.go", - "uint_slice_test.go", - ], - embed = [":go_default_library"], - importpath = "github.com/spf13/pflag", -) - -go_test( - name = "go_default_xtest", - srcs = ["example_test.go"], - importpath = "github.com/spf13/pflag_test", - deps = [":go_default_library"], -) diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index b052414d..00000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go deleted file mode 100644 index b617dd23..00000000 --- a/vendor/github.com/spf13/pflag/bool_slice_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" - "testing" -) - -func setUpBSFlagSet(bsp *[]bool) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.BoolSliceVar(bsp, "bs", []bool{}, "Command separated list!") - return f -} - -func setUpBSFlagSetWithDefault(bsp *[]bool) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.BoolSliceVar(bsp, "bs", []bool{false, true}, "Command separated list!") - return f -} - -func TestEmptyBS(t *testing.T) { - var bs []bool - f := setUpBSFlagSet(&bs) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getBS, err := f.GetBoolSlice("bs") - if err != nil { - t.Fatal("got an error from GetBoolSlice():", err) - } - if len(getBS) != 0 { - t.Fatalf("got bs %v with len=%d but expected length=0", getBS, len(getBS)) - } -} - -func TestBS(t *testing.T) { - var bs []bool - f := setUpBSFlagSet(&bs) - - vals := []string{"1", "F", "TRUE", "0"} - arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range bs { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if b != v { - t.Fatalf("expected is[%d] to be %s but got: %t", i, vals[i], v) - } - } - getBS, err := f.GetBoolSlice("bs") - if err != nil { - t.Fatalf("got error: %v", err) - } - for i, v := range getBS { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if b != v { - t.Fatalf("expected bs[%d] to be %s but got: %t from GetBoolSlice", i, vals[i], v) - } - } -} - -func TestBSDefault(t *testing.T) { - var bs []bool - f := setUpBSFlagSetWithDefault(&bs) - - vals := []string{"false", "T"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range bs { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if b != v { - t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) - } - } - - getBS, err := f.GetBoolSlice("bs") - if err != nil { - t.Fatal("got an error from GetBoolSlice():", err) - } - for i, v := range getBS { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatal("got an error from GetBoolSlice():", err) - } - if b != v { - t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) - } - } -} - -func TestBSWithDefault(t *testing.T) { - var bs []bool - f := setUpBSFlagSetWithDefault(&bs) - - vals := []string{"FALSE", "1"} - arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range bs { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if b != v { - t.Fatalf("expected bs[%d] to be %t but got: %t", i, b, v) - } - } - - getBS, err := f.GetBoolSlice("bs") - if err != nil { - t.Fatal("got an error from GetBoolSlice():", err) - } - for i, v := range getBS { - b, err := strconv.ParseBool(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if b != v { - t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) - } - } -} - -func TestBSCalledTwice(t *testing.T) { - var bs []bool - f := setUpBSFlagSet(&bs) - - in := []string{"T,F", "T"} - expected := []bool{true, false, true} - argfmt := "--bs=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range bs { - if expected[i] != v { - t.Fatalf("expected bs[%d] to be %t but got %t", i, expected[i], v) - } - } -} - -func TestBSBadQuoting(t *testing.T) { - - tests := []struct { - Want []bool - FlagArg []string - }{ - { - Want: []bool{true, false, true}, - FlagArg: []string{"1", "0", "true"}, - }, - { - Want: []bool{true, false}, - FlagArg: []string{"True", "F"}, - }, - { - Want: []bool{true, false}, - FlagArg: []string{"T", "0"}, - }, - { - Want: []bool{true, false}, - FlagArg: []string{"1", "0"}, - }, - { - Want: []bool{true, false, false}, - FlagArg: []string{"true,false", "false"}, - }, - { - Want: []bool{true, false, false, true, false, true, false}, - FlagArg: []string{`"true,false,false,1,0, T"`, " false "}, - }, - { - Want: []bool{false, false, true, false, true, false, true}, - FlagArg: []string{`"0, False, T,false , true,F"`, "true"}, - }, - } - - for i, test := range tests { - - var bs []bool - f := setUpBSFlagSet(&bs) - - if err := f.Parse([]string{fmt.Sprintf("--bs=%s", strings.Join(test.FlagArg, ","))}); err != nil { - t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%#v", - err, test.FlagArg, test.Want[i]) - } - - for j, b := range bs { - if b != test.Want[j] { - t.Fatalf("bad value parsed for test %d on bool %d:\nwant:\t%t\ngot:\t%t", i, j, test.Want[j], b) - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/bool_test.go b/vendor/github.com/spf13/pflag/bool_test.go deleted file mode 100644 index a4319e79..00000000 --- a/vendor/github.com/spf13/pflag/bool_test.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "strconv" - "testing" -) - -// This value can be a boolean ("true", "false") or "maybe" -type triStateValue int - -const ( - triStateFalse triStateValue = 0 - triStateTrue triStateValue = 1 - triStateMaybe triStateValue = 2 -) - -const strTriStateMaybe = "maybe" - -func (v *triStateValue) IsBoolFlag() bool { - return true -} - -func (v *triStateValue) Get() interface{} { - return triStateValue(*v) -} - -func (v *triStateValue) Set(s string) error { - if s == strTriStateMaybe { - *v = triStateMaybe - return nil - } - boolVal, err := strconv.ParseBool(s) - if boolVal { - *v = triStateTrue - } else { - *v = triStateFalse - } - return err -} - -func (v *triStateValue) String() string { - if *v == triStateMaybe { - return strTriStateMaybe - } - return strconv.FormatBool(*v == triStateTrue) -} - -// The type of the flag as required by the pflag.Value interface -func (v *triStateValue) Type() string { - return "version" -} - -func setUpFlagSet(tristate *triStateValue) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - *tristate = triStateFalse - flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)") - flag.NoOptDefVal = "true" - return f -} - -func TestExplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=true"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestImplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlag(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"-t"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlagExtraArgument(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - // The"maybe"turns into an arg, since short boolean options will only do true/false - err := f.Parse([]string{"-t", "maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } - args := f.Args() - if len(args) != 1 || args[0] != "maybe" { - t.Fatal("expected an extra 'maybe' argument to stick around") - } -} - -func TestExplicitMaybe(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateMaybe { - t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") - } -} - -func TestExplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=false"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestImplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestInvalidValue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - var buf bytes.Buffer - f.SetOutput(&buf) - err := f.Parse([]string{"--tristate=invalid"}) - if err == nil { - t.Fatal("expected an error but did not get any, tristate has value", tristate) - } -} - -func TestBoolP(t *testing.T) { - b := BoolP("bool", "b", false, "bool value in CommandLine") - c := BoolP("c", "c", false, "other bool value") - args := []string{"--bool"} - if err := CommandLine.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if *b != true { - t.Errorf("expected b=true got b=%v", *b) - } - if *c != false { - t.Errorf("expect c=false got c=%v", *c) - } -} diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go index 12c58db9..67d53045 100644 --- a/vendor/github.com/spf13/pflag/bytes.go +++ b/vendor/github.com/spf13/pflag/bytes.go @@ -1,6 +1,7 @@ package pflag import ( + "encoding/base64" "encoding/hex" "fmt" "strings" @@ -9,10 +10,12 @@ import ( // BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded type bytesHexValue []byte +// String implements pflag.Value.String. func (bytesHex bytesHexValue) String() string { return fmt.Sprintf("%X", []byte(bytesHex)) } +// Set implements pflag.Value.Set. func (bytesHex *bytesHexValue) Set(value string) error { bin, err := hex.DecodeString(strings.TrimSpace(value)) @@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error { return nil } +// Type implements pflag.Value.Type. func (*bytesHexValue) Type() string { return "bytesHex" } @@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte { func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { return CommandLine.BytesHexP(name, shorthand, value, usage) } + +// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded +type bytesBase64Value []byte + +// String implements pflag.Value.String. +func (bytesBase64 bytesBase64Value) String() string { + return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) +} + +// Set implements pflag.Value.Set. +func (bytesBase64 *bytesBase64Value) Set(value string) error { + bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) + + if err != nil { + return err + } + + *bytesBase64 = bin + + return nil +} + +// Type implements pflag.Value.Type. +func (*bytesBase64Value) Type() string { + return "bytesBase64" +} + +func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { + *p = val + return (*bytesBase64Value)(p) +} + +func bytesBase64ValueConv(sval string) (interface{}, error) { + + bin, err := base64.StdEncoding.DecodeString(sval) + if err == nil { + return bin, nil + } + + return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) +} + +// GetBytesBase64 return the []byte value of a flag with the given name +func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { + val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) + + if err != nil { + return []byte{}, err + } + + return val.([]byte), nil +} + +// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { + f.VarP(newBytesBase64Value(value, p), name, "", usage) +} + +// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { + f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) +} + +// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { + CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) +} + +// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. +func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { + CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) +} + +// BytesBase64 defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesBase64VarP(p, name, "", value, usage) + return p +} + +// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesBase64VarP(p, name, shorthand, value, usage) + return p +} + +// BytesBase64 defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func BytesBase64(name string, value []byte, usage string) *[]byte { + return CommandLine.BytesBase64P(name, "", value, usage) +} + +// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. +func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { + return CommandLine.BytesBase64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/bytes_test.go b/vendor/github.com/spf13/pflag/bytes_test.go deleted file mode 100644 index cc4a769d..00000000 --- a/vendor/github.com/spf13/pflag/bytes_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package pflag - -import ( - "fmt" - "os" - "testing" -) - -func setUpBytesHex(bytesHex *[]byte) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.BytesHexVar(bytesHex, "bytes", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in HEX") - f.BytesHexVarP(bytesHex, "bytes2", "B", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in HEX") - return f -} - -func TestBytesHex(t *testing.T) { - testCases := []struct { - input string - success bool - expected string - }{ - /// Positive cases - {"", true, ""}, // Is empty string OK ? - {"01", true, "01"}, - {"0101", true, "0101"}, - {"1234567890abcdef", true, "1234567890ABCDEF"}, - {"1234567890ABCDEF", true, "1234567890ABCDEF"}, - - // Negative cases - {"0", false, ""}, // Short string - {"000", false, ""}, /// Odd-length string - {"qq", false, ""}, /// non-hex character - } - - devnull, _ := os.Open(os.DevNull) - os.Stderr = devnull - - for i := range testCases { - var bytesHex []byte - f := setUpBytesHex(&bytesHex) - - tc := &testCases[i] - - // --bytes - args := []string{ - fmt.Sprintf("--bytes=%s", tc.input), - fmt.Sprintf("-B %s", tc.input), - fmt.Sprintf("--bytes2=%s", tc.input), - } - - for _, arg := range args { - err := f.Parse([]string{arg}) - - if err != nil && tc.success == true { - t.Errorf("expected success, got %q", err) - continue - } else if err == nil && tc.success == false { - // bytesHex, err := f.GetBytesHex("bytes") - t.Errorf("expected failure while processing %q", tc.input) - continue - } else if tc.success { - bytesHex, err := f.GetBytesHex("bytes") - if err != nil { - t.Errorf("Got error trying to fetch the IP flag: %v", err) - } - if fmt.Sprintf("%X", bytesHex) != tc.expected { - t.Errorf("expected %q, got '%X'", tc.expected, bytesHex) - } - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go deleted file mode 100644 index 3785d375..00000000 --- a/vendor/github.com/spf13/pflag/count_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package pflag - -import ( - "os" - "testing" -) - -func setUpCount(c *int) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.CountVarP(c, "verbose", "v", "a counter") - return f -} - -func TestCount(t *testing.T) { - testCases := []struct { - input []string - success bool - expected int - }{ - {[]string{}, true, 0}, - {[]string{"-v"}, true, 1}, - {[]string{"-vvv"}, true, 3}, - {[]string{"-v", "-v", "-v"}, true, 3}, - {[]string{"-v", "--verbose", "-v"}, true, 3}, - {[]string{"-v=3", "-v"}, true, 4}, - {[]string{"--verbose=0"}, true, 0}, - {[]string{"-v=0"}, true, 0}, - {[]string{"-v=a"}, false, 0}, - } - - devnull, _ := os.Open(os.DevNull) - os.Stderr = devnull - for i := range testCases { - var count int - f := setUpCount(&count) - - tc := &testCases[i] - - err := f.Parse(tc.input) - if err != nil && tc.success == true { - t.Errorf("expected success, got %q", err) - continue - } else if err == nil && tc.success == false { - t.Errorf("expected failure, got success") - continue - } else if tc.success { - c, err := f.GetCount("verbose") - if err != nil { - t.Errorf("Got error trying to fetch the counter flag") - } - if c != tc.expected { - t.Errorf("expected %d, got %d", tc.expected, c) - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/duration_slice_test.go b/vendor/github.com/spf13/pflag/duration_slice_test.go deleted file mode 100644 index 489b012f..00000000 --- a/vendor/github.com/spf13/pflag/duration_slice_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code ds governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "strings" - "testing" - "time" -) - -func setUpDSFlagSet(dsp *[]time.Duration) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.DurationSliceVar(dsp, "ds", []time.Duration{}, "Command separated list!") - return f -} - -func setUpDSFlagSetWithDefault(dsp *[]time.Duration) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.DurationSliceVar(dsp, "ds", []time.Duration{0, 1}, "Command separated list!") - return f -} - -func TestEmptyDS(t *testing.T) { - var ds []time.Duration - f := setUpDSFlagSet(&ds) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getDS, err := f.GetDurationSlice("ds") - if err != nil { - t.Fatal("got an error from GetDurationSlice():", err) - } - if len(getDS) != 0 { - t.Fatalf("got ds %v with len=%d but expected length=0", getDS, len(getDS)) - } -} - -func TestDS(t *testing.T) { - var ds []time.Duration - f := setUpDSFlagSet(&ds) - - vals := []string{"1ns", "2ms", "3m", "4h"} - arg := fmt.Sprintf("--ds=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ds { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %s but got: %d", i, vals[i], v) - } - } - getDS, err := f.GetDurationSlice("ds") - if err != nil { - t.Fatalf("got error: %v", err) - } - for i, v := range getDS { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %s but got: %d from GetDurationSlice", i, vals[i], v) - } - } -} - -func TestDSDefault(t *testing.T) { - var ds []time.Duration - f := setUpDSFlagSetWithDefault(&ds) - - vals := []string{"0s", "1ns"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ds { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %d but got: %d", i, d, v) - } - } - - getDS, err := f.GetDurationSlice("ds") - if err != nil { - t.Fatal("got an error from GetDurationSlice():", err) - } - for i, v := range getDS { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatal("got an error from GetDurationSlice():", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %d from GetDurationSlice but got: %d", i, d, v) - } - } -} - -func TestDSWithDefault(t *testing.T) { - var ds []time.Duration - f := setUpDSFlagSetWithDefault(&ds) - - vals := []string{"1ns", "2ns"} - arg := fmt.Sprintf("--ds=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ds { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %d but got: %d", i, d, v) - } - } - - getDS, err := f.GetDurationSlice("ds") - if err != nil { - t.Fatal("got an error from GetDurationSlice():", err) - } - for i, v := range getDS { - d, err := time.ParseDuration(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected ds[%d] to be %d from GetDurationSlice but got: %d", i, d, v) - } - } -} - -func TestDSCalledTwice(t *testing.T) { - var ds []time.Duration - f := setUpDSFlagSet(&ds) - - in := []string{"1ns,2ns", "3ns"} - expected := []time.Duration{1, 2, 3} - argfmt := "--ds=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ds { - if expected[i] != v { - t.Fatalf("expected ds[%d] to be %d but got: %d", i, expected[i], v) - } - } -} diff --git a/vendor/github.com/spf13/pflag/example_test.go b/vendor/github.com/spf13/pflag/example_test.go deleted file mode 100644 index abd7806f..00000000 --- a/vendor/github.com/spf13/pflag/example_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag_test - -import ( - "fmt" - - "github.com/spf13/pflag" -) - -func ExampleShorthandLookup() { - name := "verbose" - short := name[:1] - - pflag.BoolP(name, short, false, "verbose output") - - // len(short) must be == 1 - flag := pflag.ShorthandLookup(short) - - fmt.Println(flag.Name) -} - -func ExampleFlagSet_ShorthandLookup() { - name := "verbose" - short := name[:1] - - fs := pflag.NewFlagSet("Example", pflag.ContinueOnError) - fs.BoolP(name, short, false, "verbose output") - - // len(short) must be == 1 - flag := fs.ShorthandLookup(short) - - fmt.Println(flag.Name) -} diff --git a/vendor/github.com/spf13/pflag/export_test.go b/vendor/github.com/spf13/pflag/export_test.go deleted file mode 100644 index 9318fee0..00000000 --- a/vendor/github.com/spf13/pflag/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "io/ioutil" - "os" -) - -// Additional routines compiled into the package only during testing. - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = &FlagSet{ - name: os.Args[0], - errorHandling: ContinueOnError, - output: ioutil.Discard, - } - Usage = usage -} - -// GetCommandLine returns the default FlagSet. -func GetCommandLine() *FlagSet { - return CommandLine -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 5eadc84e..5cc710cc 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -990,11 +990,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin } func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { + outArgs = args + if strings.HasPrefix(shorthands, "test.") { return } - outArgs = args outShorts = shorthands[1:] c := shorthands[0] diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go deleted file mode 100644 index f600f0ae..00000000 --- a/vendor/github.com/spf13/pflag/flag_test.go +++ /dev/null @@ -1,1259 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "reflect" - "sort" - "strconv" - "strings" - "testing" - "time" -) - -var ( - testBool = Bool("test_bool", false, "bool value") - testInt = Int("test_int", 0, "int value") - testInt64 = Int64("test_int64", 0, "int64 value") - testUint = Uint("test_uint", 0, "uint value") - testUint64 = Uint64("test_uint64", 0, "uint64 value") - testString = String("test_string", "0", "string value") - testFloat = Float64("test_float64", 0, "float64 value") - testDuration = Duration("test_duration", 0, "time.Duration value") - testOptionalInt = Int("test_optional_int", 0, "optional int value") - normalizeFlagNameInvocations = 0 -) - -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - if len(f.Name) > 5 && f.Name[0:5] == "test_" { - m[f.Name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case f.Name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case f.Name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", f.Name) - } - } - } - VisitAll(visitor) - if len(m) != 9 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - Set("test_optional_int", "1") - desired = "1" - Visit(visitor) - if len(m) != 9 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestUsage(t *testing.T) { - called := false - ResetForTesting(func() { called = true }) - if GetCommandLine().Parse([]string{"--x"}) == nil { - t.Error("parse did not fail for unknown flag") - } - if called { - t.Error("did call Usage while using ContinueOnError") - } -} - -func TestAddFlagSet(t *testing.T) { - oldSet := NewFlagSet("old", ContinueOnError) - newSet := NewFlagSet("new", ContinueOnError) - - oldSet.String("flag1", "flag1", "flag1") - oldSet.String("flag2", "flag2", "flag2") - - newSet.String("flag2", "flag2", "flag2") - newSet.String("flag3", "flag3", "flag3") - - oldSet.AddFlagSet(newSet) - - if len(oldSet.formal) != 3 { - t.Errorf("Unexpected result adding a FlagSet to a FlagSet %v", oldSet) - } -} - -func TestAnnotation(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - - if err := f.SetAnnotation("missing-flag", "key", nil); err == nil { - t.Errorf("Expected error setting annotation on non-existent flag") - } - - f.StringP("stringa", "a", "", "string value") - if err := f.SetAnnotation("stringa", "key", nil); err != nil { - t.Errorf("Unexpected error setting new nil annotation: %v", err) - } - if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil { - t.Errorf("Unexpected annotation: %v", annotation) - } - - f.StringP("stringb", "b", "", "string2 value") - if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil { - t.Errorf("Unexpected error setting new annotation: %v", err) - } - if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) { - t.Errorf("Unexpected annotation: %v", annotation) - } - - if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil { - t.Errorf("Unexpected error updating annotation: %v", err) - } - if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) { - t.Errorf("Unexpected annotation: %v", annotation) - } -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool("bool", false, "bool value") - bool2Flag := f.Bool("bool2", false, "bool2 value") - bool3Flag := f.Bool("bool3", false, "bool3 value") - intFlag := f.Int("int", 0, "int value") - int8Flag := f.Int8("int8", 0, "int value") - int16Flag := f.Int16("int16", 0, "int value") - int32Flag := f.Int32("int32", 0, "int value") - int64Flag := f.Int64("int64", 0, "int64 value") - uintFlag := f.Uint("uint", 0, "uint value") - uint8Flag := f.Uint8("uint8", 0, "uint value") - uint16Flag := f.Uint16("uint16", 0, "uint value") - uint32Flag := f.Uint32("uint32", 0, "uint value") - uint64Flag := f.Uint64("uint64", 0, "uint64 value") - stringFlag := f.String("string", "0", "string value") - float32Flag := f.Float32("float32", 0, "float32 value") - float64Flag := f.Float64("float64", 0, "float64 value") - ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") - maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") - durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") - optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value") - f.Lookup("optional-int-no-value").NoOptDefVal = "9" - optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value") - f.Lookup("optional-int-no-value").NoOptDefVal = "9" - extra := "one-extra-argument" - args := []string{ - "--bool", - "--bool2=true", - "--bool3=false", - "--int=22", - "--int8=-8", - "--int16=-16", - "--int32=-32", - "--int64=0x23", - "--uint", "24", - "--uint8=8", - "--uint16=16", - "--uint32=32", - "--uint64=25", - "--string=hello", - "--float32=-172e12", - "--float64=2718e28", - "--ip=10.11.12.13", - "--mask=255.255.255.0", - "--duration=2m", - "--optional-int-no-value", - "--optional-int-with-value=42", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if v, err := f.GetBool("bool"); err != nil || v != *boolFlag { - t.Error("GetBool does not work.") - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if *bool3Flag != false { - t.Error("bool3 flag should be false, is ", *bool2Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if v, err := f.GetInt("int"); err != nil || v != *intFlag { - t.Error("GetInt does not work.") - } - if *int8Flag != -8 { - t.Error("int8 flag should be 0x23, is ", *int8Flag) - } - if *int16Flag != -16 { - t.Error("int16 flag should be -16, is ", *int16Flag) - } - if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag { - t.Error("GetInt8 does not work.") - } - if v, err := f.GetInt16("int16"); err != nil || v != *int16Flag { - t.Error("GetInt16 does not work.") - } - if *int32Flag != -32 { - t.Error("int32 flag should be 0x23, is ", *int32Flag) - } - if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag { - t.Error("GetInt32 does not work.") - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag { - t.Error("GetInt64 does not work.") - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if v, err := f.GetUint("uint"); err != nil || v != *uintFlag { - t.Error("GetUint does not work.") - } - if *uint8Flag != 8 { - t.Error("uint8 flag should be 8, is ", *uint8Flag) - } - if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag { - t.Error("GetUint8 does not work.") - } - if *uint16Flag != 16 { - t.Error("uint16 flag should be 16, is ", *uint16Flag) - } - if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag { - t.Error("GetUint16 does not work.") - } - if *uint32Flag != 32 { - t.Error("uint32 flag should be 32, is ", *uint32Flag) - } - if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag { - t.Error("GetUint32 does not work.") - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag { - t.Error("GetUint64 does not work.") - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if v, err := f.GetString("string"); err != nil || v != *stringFlag { - t.Error("GetString does not work.") - } - if *float32Flag != -172e12 { - t.Error("float32 flag should be -172e12, is ", *float32Flag) - } - if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag { - t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag { - t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag) - } - if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { - t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) - } - if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) { - t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag) - } - if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { - t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) - } - if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() { - t.Errorf("GetIP returned %v maskFlag was %v error was %v", v, *maskFlag, err) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag { - t.Error("GetDuration does not work.") - } - if _, err := f.GetInt("duration"); err == nil { - t.Error("GetInt parsed a time.Duration?!?!") - } - if *optionalIntNoValueFlag != 9 { - t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag) - } - if *optionalIntWithValueFlag != 42 { - t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func testParseAll(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - f.BoolP("boola", "a", false, "bool value") - f.BoolP("boolb", "b", false, "bool2 value") - f.BoolP("boolc", "c", false, "bool3 value") - f.BoolP("boold", "d", false, "bool4 value") - f.StringP("stringa", "s", "0", "string value") - f.StringP("stringz", "z", "0", "string value") - f.StringP("stringx", "x", "0", "string value") - f.StringP("stringy", "y", "0", "string value") - f.Lookup("stringx").NoOptDefVal = "1" - args := []string{ - "-ab", - "-cs=xx", - "--stringz=something", - "-d=true", - "-x", - "-y", - "ee", - } - want := []string{ - "boola", "true", - "boolb", "true", - "boolc", "true", - "stringa", "xx", - "stringz", "something", - "boold", "true", - "stringx", "1", - "stringy", "ee", - } - got := []string{} - store := func(flag *Flag, value string) error { - got = append(got, flag.Name) - if len(value) > 0 { - got = append(got, value) - } - return nil - } - if err := f.ParseAll(args, store); err != nil { - t.Errorf("expected no error, got %s", err) - } - if !f.Parsed() { - t.Errorf("f.Parse() = false after Parse") - } - if !reflect.DeepEqual(got, want) { - t.Errorf("f.ParseAll() fail to restore the args") - t.Errorf("Got: %v", got) - t.Errorf("Want: %v", want) - } -} - -func testParseWithUnknownFlags(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - f.ParseErrorsWhitelist.UnknownFlags = true - - f.BoolP("boola", "a", false, "bool value") - f.BoolP("boolb", "b", false, "bool2 value") - f.BoolP("boolc", "c", false, "bool3 value") - f.BoolP("boold", "d", false, "bool4 value") - f.BoolP("boole", "e", false, "bool4 value") - f.StringP("stringa", "s", "0", "string value") - f.StringP("stringz", "z", "0", "string value") - f.StringP("stringx", "x", "0", "string value") - f.StringP("stringy", "y", "0", "string value") - f.StringP("stringo", "o", "0", "string value") - f.Lookup("stringx").NoOptDefVal = "1" - args := []string{ - "-ab", - "-cs=xx", - "--stringz=something", - "--unknown1", - "unknown1Value", - "-d=true", - "-x", - "--unknown2=unknown2Value", - "-u=unknown3Value", - "-p", - "unknown4Value", - "-q", //another unknown with bool value - "-y", - "ee", - "--unknown7=unknown7value", - "--stringo=ovalue", - "--unknown8=unknown8value", - "--boole", - "--unknown6", - } - want := []string{ - "boola", "true", - "boolb", "true", - "boolc", "true", - "stringa", "xx", - "stringz", "something", - "boold", "true", - "stringx", "1", - "stringy", "ee", - "stringo", "ovalue", - "boole", "true", - } - got := []string{} - store := func(flag *Flag, value string) error { - got = append(got, flag.Name) - if len(value) > 0 { - got = append(got, value) - } - return nil - } - if err := f.ParseAll(args, store); err != nil { - t.Errorf("expected no error, got %s", err) - } - if !f.Parsed() { - t.Errorf("f.Parse() = false after Parse") - } - if !reflect.DeepEqual(got, want) { - t.Errorf("f.ParseAll() fail to restore the args") - t.Errorf("Got: %v", got) - t.Errorf("Want: %v", want) - } -} - -func TestShorthand(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolaFlag := f.BoolP("boola", "a", false, "bool value") - boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") - boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") - booldFlag := f.BoolP("boold", "d", false, "bool4 value") - stringaFlag := f.StringP("stringa", "s", "0", "string value") - stringzFlag := f.StringP("stringz", "z", "0", "string value") - extra := "interspersed-argument" - notaflag := "--i-look-like-a-flag" - args := []string{ - "-ab", - extra, - "-cs", - "hello", - "-z=something", - "-d=true", - "--", - notaflag, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolaFlag != true { - t.Error("boola flag should be true, is ", *boolaFlag) - } - if *boolbFlag != true { - t.Error("boolb flag should be true, is ", *boolbFlag) - } - if *boolcFlag != true { - t.Error("boolc flag should be true, is ", *boolcFlag) - } - if *booldFlag != true { - t.Error("boold flag should be true, is ", *booldFlag) - } - if *stringaFlag != "hello" { - t.Error("stringa flag should be `hello`, is ", *stringaFlag) - } - if *stringzFlag != "something" { - t.Error("stringz flag should be `something`, is ", *stringzFlag) - } - if len(f.Args()) != 2 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } else if f.Args()[1] != notaflag { - t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) - } - if f.ArgsLenAtDash() != 1 { - t.Errorf("expected argsLenAtDash %d got %d", f.ArgsLenAtDash(), 1) - } -} - -func TestShorthandLookup(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - f.BoolP("boola", "a", false, "bool value") - f.BoolP("boolb", "b", false, "bool2 value") - args := []string{ - "-ab", - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - flag := f.ShorthandLookup("a") - if flag == nil { - t.Errorf("f.ShorthandLookup(\"a\") returned nil") - } - if flag.Name != "boola" { - t.Errorf("f.ShorthandLookup(\"a\") found %q instead of \"boola\"", flag.Name) - } - flag = f.ShorthandLookup("") - if flag != nil { - t.Errorf("f.ShorthandLookup(\"\") did not return nil") - } - defer func() { - recover() - }() - flag = f.ShorthandLookup("ab") - // should NEVER get here. lookup should panic. defer'd func should recover it. - t.Errorf("f.ShorthandLookup(\"ab\") did not panic") -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(GetCommandLine(), t) -} - -func TestParseAll(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParseAll(GetCommandLine(), t) -} - -func TestIgnoreUnknownFlags(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParseWithUnknownFlags(GetCommandLine(), t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -func TestChangedHelper(t *testing.T) { - f := NewFlagSet("changedtest", ContinueOnError) - f.Bool("changed", false, "changed bool") - f.Bool("settrue", true, "true to true") - f.Bool("setfalse", false, "false to false") - f.Bool("unchanged", false, "unchanged bool") - - args := []string{"--changed", "--settrue", "--setfalse=false"} - if err := f.Parse(args); err != nil { - t.Error("f.Parse() = false after Parse") - } - if !f.Changed("changed") { - t.Errorf("--changed wasn't changed!") - } - if !f.Changed("settrue") { - t.Errorf("--settrue wasn't changed!") - } - if !f.Changed("setfalse") { - t.Errorf("--setfalse wasn't changed!") - } - if f.Changed("unchanged") { - t.Errorf("--unchanged was changed!") - } - if f.Changed("invalid") { - t.Errorf("--invalid was changed!") - } - if f.ArgsLenAtDash() != -1 { - t.Errorf("Expected argsLenAtDash: %d but got %d", -1, f.ArgsLenAtDash()) - } -} - -func replaceSeparators(name string, from []string, to string) string { - result := name - for _, sep := range from { - result = strings.Replace(result, sep, to, -1) - } - // Type convert to indicate normalization has been done. - return result -} - -func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - name = replaceSeparators(name, seps, ".") - normalizeFlagNameInvocations++ - - return NormalizedName(name) -} - -func testWordSepNormalizedNames(args []string, t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - withDashFlag := f.Bool("with-dash-flag", false, "bool value") - // Set this after some flags have been added and before others. - f.SetNormalizeFunc(wordSepNormalizeFunc) - withUnderFlag := f.Bool("with_under_flag", false, "bool value") - withBothFlag := f.Bool("with-both_flag", false, "bool value") - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *withDashFlag != true { - t.Error("withDashFlag flag should be true, is ", *withDashFlag) - } - if *withUnderFlag != true { - t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) - } - if *withBothFlag != true { - t.Error("withBothFlag flag should be true, is ", *withBothFlag) - } -} - -func TestWordSepNormalizedNames(t *testing.T) { - args := []string{ - "--with-dash-flag", - "--with-under-flag", - "--with-both-flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with_dash_flag", - "--with_under_flag", - "--with_both_flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with-dash_flag", - "--with-under_flag", - "--with-both_flag", - } - testWordSepNormalizedNames(args, t) -} - -func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - - oldName := replaceSeparators("old-valid_flag", seps, ".") - newName := replaceSeparators("valid-flag", seps, ".") - - name = replaceSeparators(name, seps, ".") - switch name { - case oldName: - name = newName - } - - return NormalizedName(name) -} - -func TestCustomNormalizedNames(t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - - validFlag := f.Bool("valid-flag", false, "bool value") - f.SetNormalizeFunc(aliasAndWordSepFlagNames) - someOtherFlag := f.Bool("some-other-flag", false, "bool value") - - args := []string{"--old_valid_flag", "--some-other_flag"} - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - - if *validFlag != true { - t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) - } - if *someOtherFlag != true { - t.Error("someOtherFlag should be true, is ", *someOtherFlag) - } -} - -// Every flag we add, the name (displayed also in usage) should normalized -func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { - // Test normalization after addition - f := NewFlagSet("normalized", ContinueOnError) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid_flag" { - t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) - } - - f.SetNormalizeFunc(wordSepNormalizeFunc) - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } - - // Test normalization before addition - f = NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } -} - -// Related to https://github.com/spf13/cobra/issues/521. -func TestNormalizationSharedFlags(t *testing.T) { - f := NewFlagSet("set f", ContinueOnError) - g := NewFlagSet("set g", ContinueOnError) - nfunc := wordSepNormalizeFunc - testName := "valid_flag" - normName := nfunc(nil, testName) - if testName == string(normName) { - t.Error("TestNormalizationSharedFlags meaningless: the original and normalized flag names are identical:", testName) - } - - f.Bool(testName, false, "bool value") - g.AddFlagSet(f) - - f.SetNormalizeFunc(nfunc) - g.SetNormalizeFunc(nfunc) - - if len(f.formal) != 1 { - t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal) - } - if f.orderedFormal[0].Name != string(normName) { - t.Error("Flag name not normalized") - } - for k := range f.formal { - if k != "valid.flag" { - t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k) - } - } - - if !reflect.DeepEqual(f.formal, g.formal) || !reflect.DeepEqual(f.orderedFormal, g.orderedFormal) { - t.Error("Two flag sets sharing the same flags should stay consistent after being normalized. Original set:", f.formal, "Duplicate set:", g.formal) - } -} - -func TestNormalizationSetFlags(t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - nfunc := wordSepNormalizeFunc - testName := "valid_flag" - normName := nfunc(nil, testName) - if testName == string(normName) { - t.Error("TestNormalizationSetFlags meaningless: the original and normalized flag names are identical:", testName) - } - - f.Bool(testName, false, "bool value") - f.Set(testName, "true") - f.SetNormalizeFunc(nfunc) - - if len(f.formal) != 1 { - t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal) - } - if f.orderedFormal[0].Name != string(normName) { - t.Error("Flag name not normalized") - } - for k := range f.formal { - if k != "valid.flag" { - t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k) - } - } - - if !reflect.DeepEqual(f.formal, f.actual) { - t.Error("The map of set flags should get normalized. Formal:", f.formal, "Actual:", f.actual) - } -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *flagVar) Type() string { - return "flagVar" -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.VarP(&v, "v", "v", "usage") - if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"--unknown"}) - if out := buf.String(); !strings.Contains(out, "--unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "--before", "subcmd"} - before := Bool("before", false, "") - if err := GetCommandLine().Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = []string{"subcmd", "--after", "args"} - after := Bool("after", false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, "flag", false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"--flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by --flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"--help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, "help", false, "help flag") - helpCalled = false - err = fs.Parse([]string{"--help"}) - if err != nil { - t.Fatal("expected no error for defined --help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -func TestNoInterspersed(t *testing.T) { - f := NewFlagSet("test", ContinueOnError) - f.SetInterspersed(false) - f.Bool("true", true, "always true") - f.Bool("false", false, "always false") - err := f.Parse([]string{"--true", "break", "--false"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - args := f.Args() - if len(args) != 2 || args[0] != "break" || args[1] != "--false" { - t.Fatal("expected interspersed options/non-options to fail") - } -} - -func TestTermination(t *testing.T) { - f := NewFlagSet("termination", ContinueOnError) - boolFlag := f.BoolP("bool", "l", false, "bool value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - arg1 := "ls" - arg2 := "-l" - args := []string{ - "--", - arg1, - arg2, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Fatal("expected no error; got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag { - t.Error("expected boolFlag=false, got true") - } - if len(f.Args()) != 2 { - t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) - } - if f.Args()[0] != arg1 { - t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) - } - if f.Args()[1] != arg2 { - t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) - } - if f.ArgsLenAtDash() != 0 { - t.Errorf("expected argsLenAtDash %d got %d", 0, f.ArgsLenAtDash()) - } -} - -func getDeprecatedFlagSet() *FlagSet { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - f.MarkDeprecated("badflag", "use --good-flag instead") - return f -} -func TestDeprecatedFlagInDocs(t *testing.T) { - f := getDeprecatedFlagSet() - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - if strings.Contains(out.String(), "badflag") { - t.Errorf("found deprecated flag in usage!") - } -} - -func TestUnHiddenDeprecatedFlagInDocs(t *testing.T) { - f := getDeprecatedFlagSet() - flg := f.Lookup("badflag") - if flg == nil { - t.Fatalf("Unable to lookup 'bob' in TestUnHiddenDeprecatedFlagInDocs") - } - flg.Hidden = false - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - defaults := out.String() - if !strings.Contains(defaults, "badflag") { - t.Errorf("Did not find deprecated flag in usage!") - } - if !strings.Contains(defaults, "use --good-flag instead") { - t.Errorf("Did not find 'use --good-flag instead' in defaults") - } -} - -func TestDeprecatedFlagShorthandInDocs(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - name := "noshorthandflag" - f.BoolP(name, "n", true, "always true") - f.MarkShorthandDeprecated("noshorthandflag", fmt.Sprintf("use --%s instead", name)) - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - if strings.Contains(out.String(), "-n,") { - t.Errorf("found deprecated flag shorthand in usage!") - } -} - -func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - err := f.Parse(args) - - outC := make(chan string) - // copy the output in a separate goroutine so printing can't block indefinitely - go func() { - var buf bytes.Buffer - io.Copy(&buf, r) - outC <- buf.String() - }() - - w.Close() - os.Stderr = oldStderr - out := <-outC - - return out, err -} - -func TestDeprecatedFlagUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - usageMsg := "use --good-flag instead" - f.MarkDeprecated("badflag", usageMsg) - - args := []string{"--badflag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -func TestDeprecatedFlagShorthandUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - name := "noshorthandflag" - f.BoolP(name, "n", true, "always true") - usageMsg := fmt.Sprintf("use --%s instead", name) - f.MarkShorthandDeprecated(name, usageMsg) - - args := []string{"-n"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -func TestDeprecatedFlagUsageNormalized(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("bad-double_flag", true, "always true") - f.SetNormalizeFunc(wordSepNormalizeFunc) - usageMsg := "use --good-flag instead" - f.MarkDeprecated("bad_double-flag", usageMsg) - - args := []string{"--bad_double_flag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -// Name normalization function should be called only once on flag addition -func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { - normalizeFlagNameInvocations = 0 - - f := NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - f.Bool("with_under_flag", false, "bool value") - - if normalizeFlagNameInvocations != 1 { - t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) - } -} - -// -func TestHiddenFlagInUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("secretFlag", true, "shhh") - f.MarkHidden("secretFlag") - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - if strings.Contains(out.String(), "secretFlag") { - t.Errorf("found hidden flag in usage!") - } -} - -// -func TestHiddenFlagUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("secretFlag", true, "shhh") - f.MarkHidden("secretFlag") - - args := []string{"--secretFlag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if strings.Contains(out, "shhh") { - t.Errorf("usage message printed when using a hidden flag!") - } -} - -const defaultOutput = ` --A for bootstrapping, allow 'any' type - --Alongflagname disable bounds checking - -C, --CCC a boolean defaulting to true (default true) - --D path set relative path for local imports - -E, --EEE num[=1234] a num with NoOptDefVal (default 4321) - --F number a non-zero number (default 2.7) - --G float a float that defaults to zero - --IP ip IP address with no default - --IPMask ipMask Netmask address with no default - --IPNet ipNet IP network with no default - --Ints ints int slice with zero default - --N int a non-zero int (default 27) - --ND1 string[="bar"] a string with NoOptDefVal (default "foo") - --ND2 num[=4321] a num with NoOptDefVal (default 1234) - --StringArray stringArray string array with zero default - --StringSlice strings string slice with zero default - --Z int an int that defaults to zero - --custom custom custom Value implementation - --customP custom a VarP with default (default 10) - --maxT timeout set timeout for dial - -v, --verbose count verbosity -` - -// Custom value that satisfies the Value interface. -type customValue int - -func (cv *customValue) String() string { return fmt.Sprintf("%v", *cv) } - -func (cv *customValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *cv = customValue(v) - return err -} - -func (cv *customValue) Type() string { return "custom" } - -func TestPrintDefaults(t *testing.T) { - fs := NewFlagSet("print defaults test", ContinueOnError) - var buf bytes.Buffer - fs.SetOutput(&buf) - fs.Bool("A", false, "for bootstrapping, allow 'any' type") - fs.Bool("Alongflagname", false, "disable bounds checking") - fs.BoolP("CCC", "C", true, "a boolean defaulting to true") - fs.String("D", "", "set relative `path` for local imports") - fs.Float64("F", 2.7, "a non-zero `number`") - fs.Float64("G", 0, "a float that defaults to zero") - fs.Int("N", 27, "a non-zero int") - fs.IntSlice("Ints", []int{}, "int slice with zero default") - fs.IP("IP", nil, "IP address with no default") - fs.IPMask("IPMask", nil, "Netmask address with no default") - fs.IPNet("IPNet", net.IPNet{}, "IP network with no default") - fs.Int("Z", 0, "an int that defaults to zero") - fs.Duration("maxT", 0, "set `timeout` for dial") - fs.String("ND1", "foo", "a string with NoOptDefVal") - fs.Lookup("ND1").NoOptDefVal = "bar" - fs.Int("ND2", 1234, "a `num` with NoOptDefVal") - fs.Lookup("ND2").NoOptDefVal = "4321" - fs.IntP("EEE", "E", 4321, "a `num` with NoOptDefVal") - fs.ShorthandLookup("E").NoOptDefVal = "1234" - fs.StringSlice("StringSlice", []string{}, "string slice with zero default") - fs.StringArray("StringArray", []string{}, "string array with zero default") - fs.CountP("verbose", "v", "verbosity") - - var cv customValue - fs.Var(&cv, "custom", "custom Value implementation") - - cv2 := customValue(10) - fs.VarP(&cv2, "customP", "", "a VarP with default") - - fs.PrintDefaults() - got := buf.String() - if got != defaultOutput { - fmt.Println("\n" + got) - fmt.Println("\n" + defaultOutput) - t.Errorf("got %q want %q\n", got, defaultOutput) - } -} - -func TestVisitAllFlagOrder(t *testing.T) { - fs := NewFlagSet("TestVisitAllFlagOrder", ContinueOnError) - fs.SortFlags = false - // https://github.com/spf13/pflag/issues/120 - fs.SetNormalizeFunc(func(f *FlagSet, name string) NormalizedName { - return NormalizedName(name) - }) - - names := []string{"C", "B", "A", "D"} - for _, name := range names { - fs.Bool(name, false, "") - } - - i := 0 - fs.VisitAll(func(f *Flag) { - if names[i] != f.Name { - t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) - } - i++ - }) -} - -func TestVisitFlagOrder(t *testing.T) { - fs := NewFlagSet("TestVisitFlagOrder", ContinueOnError) - fs.SortFlags = false - names := []string{"C", "B", "A", "D"} - for _, name := range names { - fs.Bool(name, false, "") - fs.Set(name, "true") - } - - i := 0 - fs.Visit(func(f *Flag) { - if names[i] != f.Name { - t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) - } - i++ - }) -} diff --git a/vendor/github.com/spf13/pflag/golangflag_test.go b/vendor/github.com/spf13/pflag/golangflag_test.go deleted file mode 100644 index 5bd831bf..00000000 --- a/vendor/github.com/spf13/pflag/golangflag_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "testing" -) - -func TestGoflags(t *testing.T) { - goflag.String("stringFlag", "stringFlag", "stringFlag") - goflag.Bool("boolFlag", false, "boolFlag") - - f := NewFlagSet("test", ContinueOnError) - - f.AddGoFlagSet(goflag.CommandLine) - err := f.Parse([]string{"--stringFlag=bob", "--boolFlag"}) - if err != nil { - t.Fatal("expected no error; get", err) - } - - getString, err := f.GetString("stringFlag") - if err != nil { - t.Fatal("expected no error; get", err) - } - if getString != "bob" { - t.Fatalf("expected getString=bob but got getString=%s", getString) - } - - getBool, err := f.GetBool("boolFlag") - if err != nil { - t.Fatal("expected no error; get", err) - } - if getBool != true { - t.Fatalf("expected getBool=true but got getBool=%v", getBool) - } - if !f.Parsed() { - t.Fatal("f.Parsed() return false after f.Parse() called") - } - - // in fact it is useless. because `go test` called flag.Parse() - if !goflag.CommandLine.Parsed() { - t.Fatal("goflag.CommandLine.Parsed() return false after f.Parse() called") - } -} diff --git a/vendor/github.com/spf13/pflag/int_slice_test.go b/vendor/github.com/spf13/pflag/int_slice_test.go deleted file mode 100644 index 745aecb9..00000000 --- a/vendor/github.com/spf13/pflag/int_slice_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "strconv" - "strings" - "testing" -) - -func setUpISFlagSet(isp *[]int) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IntSliceVar(isp, "is", []int{}, "Command separated list!") - return f -} - -func setUpISFlagSetWithDefault(isp *[]int) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IntSliceVar(isp, "is", []int{0, 1}, "Command separated list!") - return f -} - -func TestEmptyIS(t *testing.T) { - var is []int - f := setUpISFlagSet(&is) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getIS, err := f.GetIntSlice("is") - if err != nil { - t.Fatal("got an error from GetIntSlice():", err) - } - if len(getIS) != 0 { - t.Fatalf("got is %v with len=%d but expected length=0", getIS, len(getIS)) - } -} - -func TestIS(t *testing.T) { - var is []int - f := setUpISFlagSet(&is) - - vals := []string{"1", "2", "4", "3"} - arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range is { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) - } - } - getIS, err := f.GetIntSlice("is") - if err != nil { - t.Fatalf("got error: %v", err) - } - for i, v := range getIS { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v) - } - } -} - -func TestISDefault(t *testing.T) { - var is []int - f := setUpISFlagSetWithDefault(&is) - - vals := []string{"0", "1"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range is { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) - } - } - - getIS, err := f.GetIntSlice("is") - if err != nil { - t.Fatal("got an error from GetIntSlice():", err) - } - for i, v := range getIS { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatal("got an error from GetIntSlice():", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) - } - } -} - -func TestISWithDefault(t *testing.T) { - var is []int - f := setUpISFlagSetWithDefault(&is) - - vals := []string{"1", "2"} - arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range is { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) - } - } - - getIS, err := f.GetIntSlice("is") - if err != nil { - t.Fatal("got an error from GetIntSlice():", err) - } - for i, v := range getIS { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) - } - } -} - -func TestISCalledTwice(t *testing.T) { - var is []int - f := setUpISFlagSet(&is) - - in := []string{"1,2", "3"} - expected := []int{1, 2, 3} - argfmt := "--is=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range is { - if expected[i] != v { - t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) - } - } -} diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go deleted file mode 100644 index b0c681c5..00000000 --- a/vendor/github.com/spf13/pflag/ip_slice_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" - "testing" -) - -func setUpIPSFlagSet(ipsp *[]net.IP) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IPSliceVar(ipsp, "ips", []net.IP{}, "Command separated list!") - return f -} - -func setUpIPSFlagSetWithDefault(ipsp *[]net.IP) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IPSliceVar(ipsp, "ips", - []net.IP{ - net.ParseIP("192.168.1.1"), - net.ParseIP("0:0:0:0:0:0:0:1"), - }, - "Command separated list!") - return f -} - -func TestEmptyIP(t *testing.T) { - var ips []net.IP - f := setUpIPSFlagSet(&ips) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getIPS, err := f.GetIPSlice("ips") - if err != nil { - t.Fatal("got an error from GetIPSlice():", err) - } - if len(getIPS) != 0 { - t.Fatalf("got ips %v with len=%d but expected length=0", getIPS, len(getIPS)) - } -} - -func TestIPS(t *testing.T) { - var ips []net.IP - f := setUpIPSFlagSet(&ips) - - vals := []string{"192.168.1.1", "10.0.0.1", "0:0:0:0:0:0:0:2"} - arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ips { - if ip := net.ParseIP(vals[i]); ip == nil { - t.Fatalf("invalid string being converted to IP address: %s", vals[i]) - } else if !ip.Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s from GetIPSlice", i, vals[i], v) - } - } -} - -func TestIPSDefault(t *testing.T) { - var ips []net.IP - f := setUpIPSFlagSetWithDefault(&ips) - - vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ips { - if ip := net.ParseIP(vals[i]); ip == nil { - t.Fatalf("invalid string being converted to IP address: %s", vals[i]) - } else if !ip.Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getIPS, err := f.GetIPSlice("ips") - if err != nil { - t.Fatal("got an error from GetIPSlice") - } - for i, v := range getIPS { - if ip := net.ParseIP(vals[i]); ip == nil { - t.Fatalf("invalid string being converted to IP address: %s", vals[i]) - } else if !ip.Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) - } - } -} - -func TestIPSWithDefault(t *testing.T) { - var ips []net.IP - f := setUpIPSFlagSetWithDefault(&ips) - - vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} - arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ips { - if ip := net.ParseIP(vals[i]); ip == nil { - t.Fatalf("invalid string being converted to IP address: %s", vals[i]) - } else if !ip.Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getIPS, err := f.GetIPSlice("ips") - if err != nil { - t.Fatal("got an error from GetIPSlice") - } - for i, v := range getIPS { - if ip := net.ParseIP(vals[i]); ip == nil { - t.Fatalf("invalid string being converted to IP address: %s", vals[i]) - } else if !ip.Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) - } - } -} - -func TestIPSCalledTwice(t *testing.T) { - var ips []net.IP - f := setUpIPSFlagSet(&ips) - - in := []string{"192.168.1.2,0:0:0:0:0:0:0:1", "10.0.0.1"} - expected := []net.IP{net.ParseIP("192.168.1.2"), net.ParseIP("0:0:0:0:0:0:0:1"), net.ParseIP("10.0.0.1")} - argfmt := "ips=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ips { - if !expected[i].Equal(v) { - t.Fatalf("expected ips[%d] to be %s but got: %s", i, expected[i], v) - } - } -} - -func TestIPSBadQuoting(t *testing.T) { - - tests := []struct { - Want []net.IP - FlagArg []string - }{ - { - Want: []net.IP{ - net.ParseIP("a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568"), - net.ParseIP("203.107.49.208"), - net.ParseIP("14.57.204.90"), - }, - FlagArg: []string{ - "a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568", - "203.107.49.208", - "14.57.204.90", - }, - }, - { - Want: []net.IP{ - net.ParseIP("204.228.73.195"), - net.ParseIP("86.141.15.94"), - }, - FlagArg: []string{ - "204.228.73.195", - "86.141.15.94", - }, - }, - { - Want: []net.IP{ - net.ParseIP("c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f"), - net.ParseIP("4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472"), - }, - FlagArg: []string{ - "c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f", - "4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472", - }, - }, - { - Want: []net.IP{ - net.ParseIP("5170:f971:cfac:7be3:512a:af37:952c:bc33"), - net.ParseIP("93.21.145.140"), - net.ParseIP("2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca"), - }, - FlagArg: []string{ - " 5170:f971:cfac:7be3:512a:af37:952c:bc33 , 93.21.145.140 ", - "2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca", - }, - }, - { - Want: []net.IP{ - net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), - net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), - net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), - net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), - }, - FlagArg: []string{ - `"2e5e:66b2:6441:848:5b74:76ea:574c:3a7b, 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b,2e5e:66b2:6441:848:5b74:76ea:574c:3a7b "`, - " 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"}, - }, - } - - for i, test := range tests { - - var ips []net.IP - f := setUpIPSFlagSet(&ips) - - if err := f.Parse([]string{fmt.Sprintf("--ips=%s", strings.Join(test.FlagArg, ","))}); err != nil { - t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%s", - err, test.FlagArg, test.Want[i]) - } - - for j, b := range ips { - if !b.Equal(test.Want[j]) { - t.Fatalf("bad value parsed for test %d on net.IP %d:\nwant:\t%s\ngot:\t%s", i, j, test.Want[j], b) - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/ip_test.go b/vendor/github.com/spf13/pflag/ip_test.go deleted file mode 100644 index 1fec50e4..00000000 --- a/vendor/github.com/spf13/pflag/ip_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "os" - "testing" -) - -func setUpIP(ip *net.IP) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IPVar(ip, "address", net.ParseIP("0.0.0.0"), "IP Address") - return f -} - -func TestIP(t *testing.T) { - testCases := []struct { - input string - success bool - expected string - }{ - {"0.0.0.0", true, "0.0.0.0"}, - {" 0.0.0.0 ", true, "0.0.0.0"}, - {"1.2.3.4", true, "1.2.3.4"}, - {"127.0.0.1", true, "127.0.0.1"}, - {"255.255.255.255", true, "255.255.255.255"}, - {"", false, ""}, - {"0", false, ""}, - {"localhost", false, ""}, - {"0.0.0", false, ""}, - {"0.0.0.", false, ""}, - {"0.0.0.0.", false, ""}, - {"0.0.0.256", false, ""}, - {"0 . 0 . 0 . 0", false, ""}, - } - - devnull, _ := os.Open(os.DevNull) - os.Stderr = devnull - for i := range testCases { - var addr net.IP - f := setUpIP(&addr) - - tc := &testCases[i] - - arg := fmt.Sprintf("--address=%s", tc.input) - err := f.Parse([]string{arg}) - if err != nil && tc.success == true { - t.Errorf("expected success, got %q", err) - continue - } else if err == nil && tc.success == false { - t.Errorf("expected failure") - continue - } else if tc.success { - ip, err := f.GetIP("address") - if err != nil { - t.Errorf("Got error trying to fetch the IP flag: %v", err) - } - if ip.String() != tc.expected { - t.Errorf("expected %q, got %q", tc.expected, ip.String()) - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/ipnet_test.go b/vendor/github.com/spf13/pflag/ipnet_test.go deleted file mode 100644 index 335b6fa1..00000000 --- a/vendor/github.com/spf13/pflag/ipnet_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "os" - "testing" -) - -func setUpIPNet(ip *net.IPNet) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - _, def, _ := net.ParseCIDR("0.0.0.0/0") - f.IPNetVar(ip, "address", *def, "IP Address") - return f -} - -func TestIPNet(t *testing.T) { - testCases := []struct { - input string - success bool - expected string - }{ - {"0.0.0.0/0", true, "0.0.0.0/0"}, - {" 0.0.0.0/0 ", true, "0.0.0.0/0"}, - {"1.2.3.4/8", true, "1.0.0.0/8"}, - {"127.0.0.1/16", true, "127.0.0.0/16"}, - {"255.255.255.255/19", true, "255.255.224.0/19"}, - {"255.255.255.255/32", true, "255.255.255.255/32"}, - {"", false, ""}, - {"/0", false, ""}, - {"0", false, ""}, - {"0/0", false, ""}, - {"localhost/0", false, ""}, - {"0.0.0/4", false, ""}, - {"0.0.0./8", false, ""}, - {"0.0.0.0./12", false, ""}, - {"0.0.0.256/16", false, ""}, - {"0.0.0.0 /20", false, ""}, - {"0.0.0.0/ 24", false, ""}, - {"0 . 0 . 0 . 0 / 28", false, ""}, - {"0.0.0.0/33", false, ""}, - } - - devnull, _ := os.Open(os.DevNull) - os.Stderr = devnull - for i := range testCases { - var addr net.IPNet - f := setUpIPNet(&addr) - - tc := &testCases[i] - - arg := fmt.Sprintf("--address=%s", tc.input) - err := f.Parse([]string{arg}) - if err != nil && tc.success == true { - t.Errorf("expected success, got %q", err) - continue - } else if err == nil && tc.success == false { - t.Errorf("expected failure") - continue - } else if tc.success { - ip, err := f.GetIPNet("address") - if err != nil { - t.Errorf("Got error trying to fetch the IP flag: %v", err) - } - if ip.String() != tc.expected { - t.Errorf("expected %q, got %q", tc.expected, ip.String()) - } - } - } -} diff --git a/vendor/github.com/spf13/pflag/printusage_test.go b/vendor/github.com/spf13/pflag/printusage_test.go deleted file mode 100644 index df982aab..00000000 --- a/vendor/github.com/spf13/pflag/printusage_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "bytes" - "io" - "testing" -) - -const expectedOutput = ` --long-form Some description - --long-form2 Some description - with multiline - -s, --long-name Some description - -t, --long-name2 Some description with - multiline -` - -func setUpPFlagSet(buf io.Writer) *FlagSet { - f := NewFlagSet("test", ExitOnError) - f.Bool("long-form", false, "Some description") - f.Bool("long-form2", false, "Some description\n with multiline") - f.BoolP("long-name", "s", false, "Some description") - f.BoolP("long-name2", "t", false, "Some description with\n multiline") - f.SetOutput(buf) - return f -} - -func TestPrintUsage(t *testing.T) { - buf := bytes.Buffer{} - f := setUpPFlagSet(&buf) - f.PrintDefaults() - res := buf.String() - if res != expectedOutput { - t.Errorf("Expected \n%s \nActual \n%s", expectedOutput, res) - } -} - -func setUpPFlagSet2(buf io.Writer) *FlagSet { - f := NewFlagSet("test", ExitOnError) - f.Bool("long-form", false, "Some description") - f.Bool("long-form2", false, "Some description\n with multiline") - f.BoolP("long-name", "s", false, "Some description") - f.BoolP("long-name2", "t", false, "Some description with\n multiline") - f.StringP("some-very-long-arg", "l", "test", "Some very long description having break the limit") - f.StringP("other-very-long-arg", "o", "long-default-value", "Some very long description having break the limit") - f.String("some-very-long-arg2", "very long default value", "Some very long description\nwith line break\nmultiple") - f.SetOutput(buf) - return f -} - -const expectedOutput2 = ` --long-form Some description - --long-form2 Some description - with multiline - -s, --long-name Some description - -t, --long-name2 Some description with - multiline - -o, --other-very-long-arg string Some very long description having - break the limit (default - "long-default-value") - -l, --some-very-long-arg string Some very long description having - break the limit (default "test") - --some-very-long-arg2 string Some very long description - with line break - multiple (default "very long default - value") -` - -func TestPrintUsage_2(t *testing.T) { - buf := bytes.Buffer{} - f := setUpPFlagSet2(&buf) - res := f.FlagUsagesWrapped(80) - if res != expectedOutput2 { - t.Errorf("Expected \n%q \nActual \n%q", expectedOutput2, res) - } -} diff --git a/vendor/github.com/spf13/pflag/string_array_test.go b/vendor/github.com/spf13/pflag/string_array_test.go deleted file mode 100644 index 1ceac8c6..00000000 --- a/vendor/github.com/spf13/pflag/string_array_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "testing" -) - -func setUpSAFlagSet(sap *[]string) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.StringArrayVar(sap, "sa", []string{}, "Command separated list!") - return f -} - -func setUpSAFlagSetWithDefault(sap *[]string) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.StringArrayVar(sap, "sa", []string{"default", "values"}, "Command separated list!") - return f -} - -func TestEmptySA(t *testing.T) { - var sa []string - f := setUpSAFlagSet(&sa) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getSA, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("got an error from GetStringArray():", err) - } - if len(getSA) != 0 { - t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) - } -} - -func TestEmptySAValue(t *testing.T) { - var sa []string - f := setUpSAFlagSet(&sa) - err := f.Parse([]string{"--sa="}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getSA, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("got an error from GetStringArray():", err) - } - if len(getSA) != 0 { - t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) - } -} - -func TestSADefault(t *testing.T) { - var sa []string - f := setUpSAFlagSetWithDefault(&sa) - - vals := []string{"default", "values"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range sa { - if vals[i] != v { - t.Fatalf("expected sa[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getSA, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("got an error from GetStringArray():", err) - } - for i, v := range getSA { - if vals[i] != v { - t.Fatalf("expected sa[%d] to be %s from GetStringArray but got: %s", i, vals[i], v) - } - } -} - -func TestSAWithDefault(t *testing.T) { - var sa []string - f := setUpSAFlagSetWithDefault(&sa) - - val := "one" - arg := fmt.Sprintf("--sa=%s", val) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(sa) != 1 { - t.Fatalf("expected number of values to be %d but %d", 1, len(sa)) - } - - if sa[0] != val { - t.Fatalf("expected value to be %s but got: %s", sa[0], val) - } - - getSA, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("got an error from GetStringArray():", err) - } - - if len(getSA) != 1 { - t.Fatalf("expected number of values to be %d but %d", 1, len(getSA)) - } - - if getSA[0] != val { - t.Fatalf("expected value to be %s but got: %s", getSA[0], val) - } -} - -func TestSACalledTwice(t *testing.T) { - var sa []string - f := setUpSAFlagSet(&sa) - - in := []string{"one", "two"} - expected := []string{"one", "two"} - argfmt := "--sa=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(sa) { - t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) - } - for i, v := range sa { - if expected[i] != v { - t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(sa)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) - } - } -} - -func TestSAWithSpecialChar(t *testing.T) { - var sa []string - f := setUpSAFlagSet(&sa) - - in := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} - expected := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} - argfmt := "--sa=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - arg3 := fmt.Sprintf(argfmt, in[2]) - arg4 := fmt.Sprintf(argfmt, in[3]) - err := f.Parse([]string{arg1, arg2, arg3, arg4}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(sa) { - t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) - } - for i, v := range sa { - if expected[i] != v { - t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) - } - } -} - -func TestSAWithSquareBrackets(t *testing.T) { - var sa []string - f := setUpSAFlagSet(&sa) - - in := []string{"][]-[", "[a-z]", "[a-z]+"} - expected := []string{"][]-[", "[a-z]", "[a-z]+"} - argfmt := "--sa=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - arg3 := fmt.Sprintf(argfmt, in[2]) - err := f.Parse([]string{arg1, arg2, arg3}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(sa) { - t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) - } - for i, v := range sa { - if expected[i] != v { - t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringArray("sa") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) - } - } -} diff --git a/vendor/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go deleted file mode 100644 index c41f3bd6..00000000 --- a/vendor/github.com/spf13/pflag/string_slice_test.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "strings" - "testing" -) - -func setUpSSFlagSet(ssp *[]string) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.StringSliceVar(ssp, "ss", []string{}, "Command separated list!") - return f -} - -func setUpSSFlagSetWithDefault(ssp *[]string) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.StringSliceVar(ssp, "ss", []string{"default", "values"}, "Command separated list!") - return f -} - -func TestEmptySS(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice():", err) - } - if len(getSS) != 0 { - t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) - } -} - -func TestEmptySSValue(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - err := f.Parse([]string{"--ss="}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice():", err) - } - if len(getSS) != 0 { - t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) - } -} - -func TestSS(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - - vals := []string{"one", "two", "4", "3"} - arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ss { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice():", err) - } - for i, v := range getSS { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) - } - } -} - -func TestSSDefault(t *testing.T) { - var ss []string - f := setUpSSFlagSetWithDefault(&ss) - - vals := []string{"default", "values"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ss { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice():", err) - } - for i, v := range getSS { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) - } - } -} - -func TestSSWithDefault(t *testing.T) { - var ss []string - f := setUpSSFlagSetWithDefault(&ss) - - vals := []string{"one", "two", "4", "3"} - arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ss { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice():", err) - } - for i, v := range getSS { - if vals[i] != v { - t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) - } - } -} - -func TestSSCalledTwice(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - - in := []string{"one,two", "three"} - expected := []string{"one", "two", "three"} - argfmt := "--ss=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(ss) { - t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) - } - for i, v := range ss { - if expected[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(ss)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) - } - } -} - -func TestSSWithComma(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - - in := []string{`"one,two"`, `"three"`, `"four,five",six`} - expected := []string{"one,two", "three", "four,five", "six"} - argfmt := "--ss=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - arg3 := fmt.Sprintf(argfmt, in[2]) - err := f.Parse([]string{arg1, arg2, arg3}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(ss) { - t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) - } - for i, v := range ss { - if expected[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) - } - } -} - -func TestSSWithSquareBrackets(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - - in := []string{`"[a-z]"`, `"[a-z]+"`} - expected := []string{"[a-z]", "[a-z]+"} - argfmt := "--ss=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(ss) { - t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) - } - for i, v := range ss { - if expected[i] != v { - t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) - } - } - - values, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("expected no error; got", err) - } - - if len(expected) != len(values) { - t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) - } - for i, v := range values { - if expected[i] != v { - t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) - } - } -} diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go deleted file mode 100644 index db1a19dc..00000000 --- a/vendor/github.com/spf13/pflag/uint_slice_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" - "testing" -) - -func setUpUISFlagSet(uisp *[]uint) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.UintSliceVar(uisp, "uis", []uint{}, "Command separated list!") - return f -} - -func setUpUISFlagSetWithDefault(uisp *[]uint) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.UintSliceVar(uisp, "uis", []uint{0, 1}, "Command separated list!") - return f -} - -func TestEmptyUIS(t *testing.T) { - var uis []uint - f := setUpUISFlagSet(&uis) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - - getUIS, err := f.GetUintSlice("uis") - if err != nil { - t.Fatal("got an error from GetUintSlice():", err) - } - if len(getUIS) != 0 { - t.Fatalf("got is %v with len=%d but expected length=0", getUIS, len(getUIS)) - } -} - -func TestUIS(t *testing.T) { - var uis []uint - f := setUpUISFlagSet(&uis) - - vals := []string{"1", "2", "4", "3"} - arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range uis { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatalf("got error: %v", err) - } - if uint(u) != v { - t.Fatalf("expected uis[%d] to be %s but got %d", i, vals[i], v) - } - } - getUIS, err := f.GetUintSlice("uis") - if err != nil { - t.Fatalf("got error: %v", err) - } - for i, v := range getUIS { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatalf("got error: %v", err) - } - if uint(u) != v { - t.Fatalf("expected uis[%d] to be %s but got: %d from GetUintSlice", i, vals[i], v) - } - } -} - -func TestUISDefault(t *testing.T) { - var uis []uint - f := setUpUISFlagSetWithDefault(&uis) - - vals := []string{"0", "1"} - - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range uis { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatalf("got error: %v", err) - } - if uint(u) != v { - t.Fatalf("expect uis[%d] to be %d but got: %d", i, u, v) - } - } - - getUIS, err := f.GetUintSlice("uis") - if err != nil { - t.Fatal("got an error from GetUintSlice():", err) - } - for i, v := range getUIS { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatal("got an error from GetIntSlice():", err) - } - if uint(u) != v { - t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) - } - } -} - -func TestUISWithDefault(t *testing.T) { - var uis []uint - f := setUpUISFlagSetWithDefault(&uis) - - vals := []string{"1", "2"} - arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range uis { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatalf("got error: %v", err) - } - if uint(u) != v { - t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) - } - } - - getUIS, err := f.GetUintSlice("uis") - if err != nil { - t.Fatal("got an error from GetUintSlice():", err) - } - for i, v := range getUIS { - u, err := strconv.ParseUint(vals[i], 10, 0) - if err != nil { - t.Fatalf("got error: %v", err) - } - if uint(u) != v { - t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) - } - } -} - -func TestUISCalledTwice(t *testing.T) { - var uis []uint - f := setUpUISFlagSet(&uis) - - in := []string{"1,2", "3"} - expected := []int{1, 2, 3} - argfmt := "--uis=%s" - arg1 := fmt.Sprintf(argfmt, in[0]) - arg2 := fmt.Sprintf(argfmt, in[1]) - err := f.Parse([]string{arg1, arg2}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range uis { - if uint(expected[i]) != v { - t.Fatalf("expected uis[%d] to be %d but got: %d", i, expected[i], v) - } - } -} diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes deleted file mode 100644 index d2f212e5..00000000 --- a/vendor/golang.org/x/crypto/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore deleted file mode 100644 index 8339fd61..00000000 --- a/vendor/golang.org/x/crypto/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md deleted file mode 100644 index d0485e88..00000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md deleted file mode 100644 index c9d6fecd..00000000 --- a/vendor/golang.org/x/crypto/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Go Cryptography - -This repository holds supplementary Go cryptography libraries. - -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You -can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the crypto repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the -subject line, so it is easy to find. - -Note that contributions to the cryptography package receive additional scrutiny -due to their sensitive nature. Patches may take longer than normal to receive -feedback. diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/golang.org/x/crypto/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/crypto/ssh/BUILD.bazel b/vendor/golang.org/x/crypto/ssh/BUILD.bazel deleted file mode 100644 index f07f7c22..00000000 --- a/vendor/golang.org/x/crypto/ssh/BUILD.bazel +++ /dev/null @@ -1,74 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "buffer.go", - "certs.go", - "channel.go", - "cipher.go", - "client.go", - "client_auth.go", - "common.go", - "connection.go", - "doc.go", - "handshake.go", - "kex.go", - "keys.go", - "mac.go", - "messages.go", - "mux.go", - "server.go", - "session.go", - "streamlocal.go", - "tcpip.go", - "transport.go", - ], - importpath = "golang.org/x/crypto/ssh", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/crypto/curve25519:go_default_library", - "//vendor/golang.org/x/crypto/ed25519:go_default_library", - "//vendor/golang.org/x/crypto/internal/chacha20:go_default_library", - "//vendor/golang.org/x/crypto/poly1305:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "benchmark_test.go", - "buffer_test.go", - "certs_test.go", - "cipher_test.go", - "client_auth_test.go", - "client_test.go", - "handshake_test.go", - "kex_test.go", - "keys_test.go", - "mempipe_test.go", - "messages_test.go", - "mux_test.go", - "session_test.go", - "tcpip_test.go", - "testdata_test.go", - "transport_test.go", - ], - embed = [":go_default_library"], - importpath = "golang.org/x/crypto/ssh", - deps = [ - "//vendor/golang.org/x/crypto/ed25519:go_default_library", - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - "//vendor/golang.org/x/crypto/ssh/testdata:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = ["example_test.go"], - importpath = "golang.org/x/crypto/ssh_test", - deps = [ - ":go_default_library", - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], -) diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go deleted file mode 100644 index 20c33077..00000000 --- a/vendor/golang.org/x/crypto/ssh/benchmark_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "io" - "net" - "testing" -) - -type server struct { - *ServerConn - chans <-chan NewChannel -} - -func newServer(c net.Conn, conf *ServerConfig) (*server, error) { - sconn, chans, reqs, err := NewServerConn(c, conf) - if err != nil { - return nil, err - } - go DiscardRequests(reqs) - return &server{sconn, chans}, nil -} - -func (s *server) Accept() (NewChannel, error) { - n, ok := <-s.chans - if !ok { - return nil, io.EOF - } - return n, nil -} - -func sshPipe() (Conn, *server, error) { - c1, c2, err := netPipe() - if err != nil { - return nil, nil, err - } - - clientConf := ClientConfig{ - User: "user", - HostKeyCallback: InsecureIgnoreHostKey(), - } - serverConf := ServerConfig{ - NoClientAuth: true, - } - serverConf.AddHostKey(testSigners["ecdsa"]) - done := make(chan *server, 1) - go func() { - server, err := newServer(c2, &serverConf) - if err != nil { - done <- nil - } - done <- server - }() - - client, _, reqs, err := NewClientConn(c1, "", &clientConf) - if err != nil { - return nil, nil, err - } - - server := <-done - if server == nil { - return nil, nil, errors.New("server handshake failed.") - } - go DiscardRequests(reqs) - - return client, server, nil -} - -func BenchmarkEndToEnd(b *testing.B) { - b.StopTimer() - - client, server, err := sshPipe() - if err != nil { - b.Fatalf("sshPipe: %v", err) - } - - defer client.Close() - defer server.Close() - - size := (1 << 20) - input := make([]byte, size) - output := make([]byte, size) - b.SetBytes(int64(size)) - done := make(chan int, 1) - - go func() { - newCh, err := server.Accept() - if err != nil { - b.Fatalf("Client: %v", err) - } - ch, incoming, err := newCh.Accept() - go DiscardRequests(incoming) - for i := 0; i < b.N; i++ { - if _, err := io.ReadFull(ch, output); err != nil { - b.Fatalf("ReadFull: %v", err) - } - } - ch.Close() - done <- 1 - }() - - ch, in, err := client.OpenChannel("speed", nil) - if err != nil { - b.Fatalf("OpenChannel: %v", err) - } - go DiscardRequests(in) - - b.ResetTimer() - b.StartTimer() - for i := 0; i < b.N; i++ { - if _, err := ch.Write(input); err != nil { - b.Fatalf("WriteFull: %v", err) - } - } - ch.Close() - b.StopTimer() - - <-done -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d07..00000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go deleted file mode 100644 index d5781cb3..00000000 --- a/vendor/golang.org/x/crypto/ssh/buffer_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "testing" -) - -var alphabet = []byte("abcdefghijklmnopqrstuvwxyz") - -func TestBufferReadwrite(t *testing.T) { - b := newBuffer() - b.write(alphabet[:10]) - r, _ := b.Read(make([]byte, 10)) - if r != 10 { - t.Fatalf("Expected written == read == 10, written: 10, read %d", r) - } - - b = newBuffer() - b.write(alphabet[:5]) - r, _ = b.Read(make([]byte, 10)) - if r != 5 { - t.Fatalf("Expected written == read == 5, written: 5, read %d", r) - } - - b = newBuffer() - b.write(alphabet[:10]) - r, _ = b.Read(make([]byte, 5)) - if r != 5 { - t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r) - } - - b = newBuffer() - b.write(alphabet[:5]) - b.write(alphabet[5:15]) - r, _ = b.Read(make([]byte, 10)) - r2, _ := b.Read(make([]byte, 10)) - if r != 10 || r2 != 5 || 15 != r+r2 { - t.Fatal("Expected written == read == 15") - } -} - -func TestBufferClose(t *testing.T) { - b := newBuffer() - b.write(alphabet[:10]) - b.eof() - _, err := b.Read(make([]byte, 5)) - if err != nil { - t.Fatal("expected read of 5 to not return EOF") - } - b = newBuffer() - b.write(alphabet[:10]) - b.eof() - r, err := b.Read(make([]byte, 5)) - r2, err2 := b.Read(make([]byte, 10)) - if r != 5 || r2 != 5 || err != nil || err2 != nil { - t.Fatal("expected reads of 5 and 5") - } - - b = newBuffer() - b.write(alphabet[:10]) - b.eof() - r, err = b.Read(make([]byte, 5)) - r2, err2 = b.Read(make([]byte, 10)) - r3, err3 := b.Read(make([]byte, 10)) - if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF { - t.Fatal("expected reads of 5 and 5 and 0, with EOF") - } - - b = newBuffer() - b.write(make([]byte, 5)) - b.write(make([]byte, 10)) - b.eof() - r, err = b.Read(make([]byte, 9)) - r2, err2 = b.Read(make([]byte, 3)) - r3, err3 = b.Read(make([]byte, 3)) - r4, err4 := b.Read(make([]byte, 10)) - if err != nil || err2 != nil || err3 != nil || err4 != io.EOF { - t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4) - } - if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 { - t.Fatal("Expected written == read == 15", r, r2, r3, r4) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 42106f3f..00000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - return &openSSHCertSigner{cert, signer}, nil -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go deleted file mode 100644 index c8e7cf58..00000000 --- a/vendor/golang.org/x/crypto/ssh/certs_test.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "net" - "reflect" - "testing" - "time" - - "golang.org/x/crypto/ssh/testdata" -) - -// Cert generated by ssh-keygen 6.0p1 Debian-4. -// % ssh-keygen -s ca-key -I test user-key -const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` - -func TestParseCert(t *testing.T) { - authKeyBytes := []byte(exampleSSHCert) - - key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) - if err != nil { - t.Fatalf("ParseAuthorizedKey: %v", err) - } - if len(rest) > 0 { - t.Errorf("rest: got %q, want empty", rest) - } - - if _, ok := key.(*Certificate); !ok { - t.Fatalf("got %v (%T), want *Certificate", key, key) - } - - marshaled := MarshalAuthorizedKey(key) - // Before comparison, remove the trailing newline that - // MarshalAuthorizedKey adds. - marshaled = marshaled[:len(marshaled)-1] - if !bytes.Equal(authKeyBytes, marshaled) { - t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) - } -} - -// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 -// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub -// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN -// Critical Options: -// force-command /bin/sleep -// source-address 192.168.1.0/24 -// Extensions: -// permit-X11-forwarding -// permit-agent-forwarding -// permit-port-forwarding -// permit-pty -// permit-user-rc -const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` - -func TestParseCertWithOptions(t *testing.T) { - opts := map[string]string{ - "source-address": "192.168.1.0/24", - "force-command": "/bin/sleep", - } - exts := map[string]string{ - "permit-X11-forwarding": "", - "permit-agent-forwarding": "", - "permit-port-forwarding": "", - "permit-pty": "", - "permit-user-rc": "", - } - authKeyBytes := []byte(exampleSSHCertWithOptions) - - key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) - if err != nil { - t.Fatalf("ParseAuthorizedKey: %v", err) - } - if len(rest) > 0 { - t.Errorf("rest: got %q, want empty", rest) - } - cert, ok := key.(*Certificate) - if !ok { - t.Fatalf("got %v (%T), want *Certificate", key, key) - } - if !reflect.DeepEqual(cert.CriticalOptions, opts) { - t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) - } - if !reflect.DeepEqual(cert.Extensions, exts) { - t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) - } - marshaled := MarshalAuthorizedKey(key) - // Before comparison, remove the trailing newline that - // MarshalAuthorizedKey adds. - marshaled = marshaled[:len(marshaled)-1] - if !bytes.Equal(authKeyBytes, marshaled) { - t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) - } -} - -func TestValidateCert(t *testing.T) { - key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert)) - if err != nil { - t.Fatalf("ParseAuthorizedKey: %v", err) - } - validCert, ok := key.(*Certificate) - if !ok { - t.Fatalf("got %v (%T), want *Certificate", key, key) - } - checker := CertChecker{} - checker.IsUserAuthority = func(k PublicKey) bool { - return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal()) - } - - if err := checker.CheckCert("user", validCert); err != nil { - t.Errorf("Unable to validate certificate: %v", err) - } - invalidCert := &Certificate{ - Key: testPublicKeys["rsa"], - SignatureKey: testPublicKeys["ecdsa"], - ValidBefore: CertTimeInfinity, - Signature: &Signature{}, - } - if err := checker.CheckCert("user", invalidCert); err == nil { - t.Error("Invalid cert signature passed validation") - } -} - -func TestValidateCertTime(t *testing.T) { - cert := Certificate{ - ValidPrincipals: []string{"user"}, - Key: testPublicKeys["rsa"], - ValidAfter: 50, - ValidBefore: 100, - } - - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - - for ts, ok := range map[int64]bool{ - 25: false, - 50: true, - 99: true, - 100: false, - 125: false, - } { - checker := CertChecker{ - Clock: func() time.Time { return time.Unix(ts, 0) }, - } - checker.IsUserAuthority = func(k PublicKey) bool { - return bytes.Equal(k.Marshal(), - testPublicKeys["ecdsa"].Marshal()) - } - - if v := checker.CheckCert("user", &cert); (v == nil) != ok { - t.Errorf("Authenticate(%d): %v", ts, v) - } - } -} - -// TODO(hanwen): tests for -// -// host keys: -// * fallbacks - -func TestHostKeyCert(t *testing.T) { - cert := &Certificate{ - ValidPrincipals: []string{"hostname", "hostname.domain", "otherhost"}, - Key: testPublicKeys["rsa"], - ValidBefore: CertTimeInfinity, - CertType: HostCert, - } - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - - checker := &CertChecker{ - IsHostAuthority: func(p PublicKey, addr string) bool { - return addr == "hostname:22" && bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal()) - }, - } - - certSigner, err := NewCertSigner(cert, testSigners["rsa"]) - if err != nil { - t.Errorf("NewCertSigner: %v", err) - } - - for _, test := range []struct { - addr string - succeed bool - }{ - {addr: "hostname:22", succeed: true}, - {addr: "otherhost:22", succeed: false}, // The certificate is valid for 'otherhost' as hostname, but we only recognize the authority of the signer for the address 'hostname:22' - {addr: "lasthost:22", succeed: false}, - } { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - errc := make(chan error) - - go func() { - conf := ServerConfig{ - NoClientAuth: true, - } - conf.AddHostKey(certSigner) - _, _, _, err := NewServerConn(c1, &conf) - errc <- err - }() - - config := &ClientConfig{ - User: "user", - HostKeyCallback: checker.CheckHostKey, - } - _, _, _, err = NewClientConn(c2, test.addr, config) - - if (err == nil) != test.succeed { - t.Fatalf("NewClientConn(%q): %v", test.addr, err) - } - - err = <-errc - if (err == nil) != test.succeed { - t.Fatalf("NewServerConn(%q): %v", test.addr, err) - } - } -} - -func TestCertTypes(t *testing.T) { - var testVars = []struct { - name string - keys func() Signer - }{ - { - name: CertAlgoECDSA256v01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap256"]) - return s - }, - }, - { - name: CertAlgoECDSA384v01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap384"]) - return s - }, - }, - { - name: CertAlgoECDSA521v01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap521"]) - return s - }, - }, - { - name: CertAlgoED25519v01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["ed25519"]) - return s - }, - }, - { - name: CertAlgoRSAv01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["rsa"]) - return s - }, - }, - { - name: CertAlgoDSAv01, - keys: func() Signer { - s, _ := ParsePrivateKey(testdata.PEMBytes["dsa"]) - return s - }, - }, - } - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Fatalf("error generating host key: %v", err) - } - - signer, err := NewSignerFromKey(k) - if err != nil { - t.Fatalf("error generating signer for ssh listener: %v", err) - } - - conf := &ServerConfig{ - PublicKeyCallback: func(c ConnMetadata, k PublicKey) (*Permissions, error) { - return new(Permissions), nil - }, - } - conf.AddHostKey(signer) - - for _, m := range testVars { - t.Run(m.name, func(t *testing.T) { - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewServerConn(c1, conf) - - priv := m.keys() - if err != nil { - t.Fatalf("error generating ssh pubkey: %v", err) - } - - cert := &Certificate{ - CertType: UserCert, - Key: priv.PublicKey(), - } - cert.SignCert(rand.Reader, priv) - - certSigner, err := NewCertSigner(cert, priv) - if err != nil { - t.Fatalf("error generating cert signer: %v", err) - } - - config := &ClientConfig{ - User: "user", - HostKeyCallback: func(h string, r net.Addr, k PublicKey) error { return nil }, - Auth: []AuthMethod{PublicKeys(certSigner)}, - } - - _, _, _, err = NewClientConn(c2, "", config) - if err != nil { - t.Fatalf("error connecting: %v", err) - } - }) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00..00000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 67b01261..00000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,770 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "math/bits" - - "golang.org/x/crypto/internal/chacha20" - "golang.org/x/crypto/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writePacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [8]uint32 - contentKey [8]uint32 - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - for i := range c.contentKey { - c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) - } - for i := range c.lengthKey { - c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) - } - return c, nil -} - -func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go deleted file mode 100644 index a52d6e48..00000000 --- a/vendor/golang.org/x/crypto/ssh/cipher_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/rand" - "testing" -) - -func TestDefaultCiphersExist(t *testing.T) { - for _, cipherAlgo := range supportedCiphers { - if _, ok := cipherModes[cipherAlgo]; !ok { - t.Errorf("supported cipher %q is unknown", cipherAlgo) - } - } - for _, cipherAlgo := range preferredCiphers { - if _, ok := cipherModes[cipherAlgo]; !ok { - t.Errorf("preferred cipher %q is unknown", cipherAlgo) - } - } -} - -func TestPacketCiphers(t *testing.T) { - defaultMac := "hmac-sha2-256" - defaultCipher := "aes128-ctr" - for cipher := range cipherModes { - t.Run("cipher="+cipher, - func(t *testing.T) { testPacketCipher(t, cipher, defaultMac) }) - } - for mac := range macModes { - t.Run("mac="+mac, - func(t *testing.T) { testPacketCipher(t, defaultCipher, mac) }) - } -} - -func testPacketCipher(t *testing.T, cipher, mac string) { - kr := &kexResult{Hash: crypto.SHA1} - algs := directionAlgorithms{ - Cipher: cipher, - MAC: mac, - Compression: "none", - } - client, err := newPacketCipher(clientKeys, algs, kr) - if err != nil { - t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) - } - server, err := newPacketCipher(clientKeys, algs, kr) - if err != nil { - t.Fatalf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) - } - - want := "bla bla" - input := []byte(want) - buf := &bytes.Buffer{} - if err := client.writePacket(0, buf, rand.Reader, input); err != nil { - t.Fatalf("writePacket(%q, %q): %v", cipher, mac, err) - } - - packet, err := server.readPacket(0, buf) - if err != nil { - t.Fatalf("readPacket(%q, %q): %v", cipher, mac, err) - } - - if string(packet) != want { - t.Errorf("roundtrip(%q, %q): got %q, want %q", cipher, mac, packet, want) - } -} - -func TestCBCOracleCounterMeasure(t *testing.T) { - kr := &kexResult{Hash: crypto.SHA1} - algs := directionAlgorithms{ - Cipher: aes128cbcID, - MAC: "hmac-sha1", - Compression: "none", - } - client, err := newPacketCipher(clientKeys, algs, kr) - if err != nil { - t.Fatalf("newPacketCipher(client): %v", err) - } - - want := "bla bla" - input := []byte(want) - buf := &bytes.Buffer{} - if err := client.writePacket(0, buf, rand.Reader, input); err != nil { - t.Errorf("writePacket: %v", err) - } - - packetSize := buf.Len() - buf.Write(make([]byte, 2*maxPacket)) - - // We corrupt each byte, but this usually will only test the - // 'packet too large' or 'MAC failure' cases. - lastRead := -1 - for i := 0; i < packetSize; i++ { - server, err := newPacketCipher(clientKeys, algs, kr) - if err != nil { - t.Fatalf("newPacketCipher(client): %v", err) - } - - fresh := &bytes.Buffer{} - fresh.Write(buf.Bytes()) - fresh.Bytes()[i] ^= 0x01 - - before := fresh.Len() - _, err = server.readPacket(0, fresh) - if err == nil { - t.Errorf("corrupt byte %d: readPacket succeeded ", i) - continue - } - if _, ok := err.(cbcError); !ok { - t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) - continue - } - - after := fresh.Len() - bytesRead := before - after - if bytesRead < maxPacket { - t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) - continue - } - - if i > 0 && bytesRead != lastRead { - t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) - } - lastRead = bytesRead - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index ae6ca775..00000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 5f44b774..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - tried := make(map[string]bool) - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - tried[auth.method()] = true - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if tried[candidateMethod] { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) -} - -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) - } - return s -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go deleted file mode 100644 index 5fbb20d8..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth_test.go +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto/rand" - "errors" - "fmt" - "os" - "strings" - "testing" -) - -type keyboardInteractive map[string]string - -func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) { - var answers []string - for _, q := range questions { - answers = append(answers, cr[q]) - } - return answers, nil -} - -// reused internally by tests -var clientPassword = "tiger" - -// tryAuth runs a handshake with a given config against an SSH server -// with config serverConfig -func tryAuth(t *testing.T, config *ClientConfig) error { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - certChecker := CertChecker{ - IsUserAuthority: func(k PublicKey) bool { - return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal()) - }, - UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { - if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { - return nil, nil - } - - return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) - }, - IsRevoked: func(c *Certificate) bool { - return c.Serial == 666 - }, - } - - serverConfig := &ServerConfig{ - PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { - if conn.User() == "testuser" && string(pass) == clientPassword { - return nil, nil - } - return nil, errors.New("password auth failed") - }, - PublicKeyCallback: certChecker.Authenticate, - KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) { - ans, err := challenge("user", - "instruction", - []string{"question1", "question2"}, - []bool{true, true}) - if err != nil { - return nil, err - } - ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2" - if ok { - challenge("user", "motd", nil, nil) - return nil, nil - } - return nil, errors.New("keyboard-interactive failed") - }, - } - serverConfig.AddHostKey(testSigners["rsa"]) - - go newServer(c1, serverConfig) - _, _, _, err = NewClientConn(c2, "", config) - return err -} - -func TestClientAuthPublicKey(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } -} - -func TestAuthMethodPassword(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - Password(clientPassword), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } -} - -func TestAuthMethodFallback(t *testing.T) { - var passwordCalled bool - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(testSigners["rsa"]), - PasswordCallback( - func() (string, error) { - passwordCalled = true - return "WRONG", nil - }), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } - - if passwordCalled { - t.Errorf("password auth tried before public-key auth.") - } -} - -func TestAuthMethodWrongPassword(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - Password("wrong"), - PublicKeys(testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } -} - -func TestAuthMethodKeyboardInteractive(t *testing.T) { - answers := keyboardInteractive(map[string]string{ - "question1": "answer1", - "question2": "answer2", - }) - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - KeyboardInteractive(answers.Challenge), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } -} - -func TestAuthMethodWrongKeyboardInteractive(t *testing.T) { - answers := keyboardInteractive(map[string]string{ - "question1": "answer1", - "question2": "WRONG", - }) - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - KeyboardInteractive(answers.Challenge), - }, - } - - if err := tryAuth(t, config); err == nil { - t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive") - } -} - -// the mock server will only authenticate ssh-rsa keys -func TestAuthMethodInvalidPublicKey(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(testSigners["dsa"]), - }, - } - - if err := tryAuth(t, config); err == nil { - t.Fatalf("dsa private key should not have authenticated with rsa public key") - } -} - -// the client should authenticate with the second key -func TestAuthMethodRSAandDSA(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(testSigners["dsa"], testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, config); err != nil { - t.Fatalf("client could not authenticate with rsa key: %v", err) - } -} - -func TestClientHMAC(t *testing.T) { - for _, mac := range supportedMACs { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(testSigners["rsa"]), - }, - Config: Config{ - MACs: []string{mac}, - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, config); err != nil { - t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err) - } - } -} - -// issue 4285. -func TestClientUnsupportedCipher(t *testing.T) { - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(), - }, - Config: Config{ - Ciphers: []string{"aes128-cbc"}, // not currently supported - }, - } - if err := tryAuth(t, config); err == nil { - t.Errorf("expected no ciphers in common") - } -} - -func TestClientUnsupportedKex(t *testing.T) { - if os.Getenv("GO_BUILDER_NAME") != "" { - t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198") - } - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(), - }, - Config: Config{ - KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { - t.Errorf("got %v, expected 'common algorithm'", err) - } -} - -func TestClientLoginCert(t *testing.T) { - cert := &Certificate{ - Key: testPublicKeys["rsa"], - ValidBefore: CertTimeInfinity, - CertType: UserCert, - } - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - certSigner, err := NewCertSigner(cert, testSigners["rsa"]) - if err != nil { - t.Fatalf("NewCertSigner: %v", err) - } - - clientConfig := &ClientConfig{ - User: "user", - HostKeyCallback: InsecureIgnoreHostKey(), - } - clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner)) - - // should succeed - if err := tryAuth(t, clientConfig); err != nil { - t.Errorf("cert login failed: %v", err) - } - - // corrupted signature - cert.Signature.Blob[0]++ - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with corrupted sig") - } - - // revoked - cert.Serial = 666 - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("revoked cert login succeeded") - } - cert.Serial = 1 - - // sign with wrong key - cert.SignCert(rand.Reader, testSigners["dsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with non-authoritative key") - } - - // host cert - cert.CertType = HostCert - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with wrong type") - } - cert.CertType = UserCert - - // principal specified - cert.ValidPrincipals = []string{"user"} - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err != nil { - t.Errorf("cert login failed: %v", err) - } - - // wrong principal specified - cert.ValidPrincipals = []string{"fred"} - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with wrong principal") - } - cert.ValidPrincipals = nil - - // added critical option - cert.CriticalOptions = map[string]string{"root-access": "yes"} - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with unrecognized critical option") - } - - // allowed source address - cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24,::42/120"} - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err != nil { - t.Errorf("cert login with source-address failed: %v", err) - } - - // disallowed source address - cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42,::42"} - cert.SignCert(rand.Reader, testSigners["ecdsa"]) - if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login with source-address succeeded") - } -} - -func testPermissionsPassing(withPermissions bool, t *testing.T) { - serverConfig := &ServerConfig{ - PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { - if conn.User() == "nopermissions" { - return nil, nil - } - return &Permissions{}, nil - }, - } - serverConfig.AddHostKey(testSigners["rsa"]) - - clientConfig := &ClientConfig{ - Auth: []AuthMethod{ - PublicKeys(testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if withPermissions { - clientConfig.User = "permissions" - } else { - clientConfig.User = "nopermissions" - } - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewClientConn(c2, "", clientConfig) - serverConn, err := newServer(c1, serverConfig) - if err != nil { - t.Fatal(err) - } - if p := serverConn.Permissions; (p != nil) != withPermissions { - t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p) - } -} - -func TestPermissionsPassing(t *testing.T) { - testPermissionsPassing(true, t) -} - -func TestNoPermissionsPassing(t *testing.T) { - testPermissionsPassing(false, t) -} - -func TestRetryableAuth(t *testing.T) { - n := 0 - passwords := []string{"WRONG1", "WRONG2"} - - config := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - RetryableAuthMethod(PasswordCallback(func() (string, error) { - p := passwords[n] - n++ - return p, nil - }), 2), - PublicKeys(testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } - if n != 2 { - t.Fatalf("Did not try all passwords") - } -} - -func ExampleRetryableAuthMethod(t *testing.T) { - user := "testuser" - NumberOfPrompts := 3 - - // Normally this would be a callback that prompts the user to answer the - // provided questions - Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - return []string{"answer1", "answer2"}, nil - } - - config := &ClientConfig{ - HostKeyCallback: InsecureIgnoreHostKey(), - User: user, - Auth: []AuthMethod{ - RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts), - }, - } - - if err := tryAuth(t, config); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } -} - -// Test if username is received on server side when NoClientAuth is used -func TestClientAuthNone(t *testing.T) { - user := "testuser" - serverConfig := &ServerConfig{ - NoClientAuth: true, - } - serverConfig.AddHostKey(testSigners["rsa"]) - - clientConfig := &ClientConfig{ - User: user, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewClientConn(c2, "", clientConfig) - serverConn, err := newServer(c1, serverConfig) - if err != nil { - t.Fatalf("newServer: %v", err) - } - if serverConn.User() != user { - t.Fatalf("server: got %q, want %q", serverConn.User(), user) - } -} - -// Test if authentication attempts are limited on server when MaxAuthTries is set -func TestClientAuthMaxAuthTries(t *testing.T) { - user := "testuser" - - serverConfig := &ServerConfig{ - MaxAuthTries: 2, - PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { - if conn.User() == "testuser" && string(pass) == "right" { - return nil, nil - } - return nil, errors.New("password auth failed") - }, - } - serverConfig.AddHostKey(testSigners["rsa"]) - - expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - }) - - for tries := 2; tries < 4; tries++ { - n := tries - clientConfig := &ClientConfig{ - User: user, - Auth: []AuthMethod{ - RetryableAuthMethod(PasswordCallback(func() (string, error) { - n-- - if n == 0 { - return "right", nil - } - return "wrong", nil - }), tries), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go newServer(c1, serverConfig) - _, _, _, err = NewClientConn(c2, "", clientConfig) - if tries > 2 { - if err == nil { - t.Fatalf("client: got no error, want %s", expectedErr) - } else if err.Error() != expectedErr.Error() { - t.Fatalf("client: got %s, want %s", err, expectedErr) - } - } else { - if err != nil { - t.Fatalf("client: got %s, want no error", err) - } - } - } -} - -// Test if authentication attempts are correctly limited on server -// when more public keys are provided then MaxAuthTries -func TestClientAuthMaxAuthTriesPublicKey(t *testing.T) { - signers := []Signer{} - for i := 0; i < 6; i++ { - signers = append(signers, testSigners["dsa"]) - } - - validConfig := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(append([]Signer{testSigners["rsa"]}, signers...)...), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, validConfig); err != nil { - t.Fatalf("unable to dial remote side: %s", err) - } - - expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - }) - invalidConfig := &ClientConfig{ - User: "testuser", - Auth: []AuthMethod{ - PublicKeys(append(signers, testSigners["rsa"])...), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - if err := tryAuth(t, invalidConfig); err == nil { - t.Fatalf("client: got no error, want %s", expectedErr) - } else if err.Error() != expectedErr.Error() { - t.Fatalf("client: got %s, want %s", err, expectedErr) - } -} - -// Test whether authentication errors are being properly logged if all -// authentication methods have been exhausted -func TestClientAuthErrorList(t *testing.T) { - publicKeyErr := errors.New("This is an error from PublicKeyCallback") - - clientConfig := &ClientConfig{ - Auth: []AuthMethod{ - PublicKeys(testSigners["rsa"]), - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - serverConfig := &ServerConfig{ - PublicKeyCallback: func(_ ConnMetadata, _ PublicKey) (*Permissions, error) { - return nil, publicKeyErr - }, - } - serverConfig.AddHostKey(testSigners["rsa"]) - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewClientConn(c2, "", clientConfig) - _, err = newServer(c1, serverConfig) - if err == nil { - t.Fatal("newServer: got nil, expected errors") - } - - authErrs, ok := err.(*ServerAuthError) - if !ok { - t.Fatalf("errors: got %T, want *ssh.ServerAuthError", err) - } - for i, e := range authErrs.Errors { - switch i { - case 0: - if e != ErrNoAuth { - t.Fatalf("errors: got error %v, want ErrNoAuth", e) - } - case 1: - if e != publicKeyErr { - t.Fatalf("errors: got %v, want %v", e, publicKeyErr) - } - default: - t.Fatalf("errors: got %v, expected 2 errors", authErrs.Errors) - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go deleted file mode 100644 index 81f9599e..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "strings" - "testing" -) - -func TestClientVersion(t *testing.T) { - for _, tt := range []struct { - name string - version string - multiLine string - wantErr bool - }{ - { - name: "default version", - version: packageVersion, - }, - { - name: "custom version", - version: "SSH-2.0-CustomClientVersionString", - }, - { - name: "good multi line version", - version: packageVersion, - multiLine: strings.Repeat("ignored\r\n", 20), - }, - { - name: "bad multi line version", - version: packageVersion, - multiLine: "bad multi line version", - wantErr: true, - }, - { - name: "long multi line version", - version: packageVersion, - multiLine: strings.Repeat("long multi line version\r\n", 50)[:256], - wantErr: true, - }, - } { - t.Run(tt.name, func(t *testing.T) { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - go func() { - if tt.multiLine != "" { - c1.Write([]byte(tt.multiLine)) - } - NewClientConn(c1, "", &ClientConfig{ - ClientVersion: tt.version, - HostKeyCallback: InsecureIgnoreHostKey(), - }) - c1.Close() - }() - conf := &ServerConfig{NoClientAuth: true} - conf.AddHostKey(testSigners["rsa"]) - conn, _, _, err := NewServerConn(c2, conf) - if err == nil == tt.wantErr { - t.Fatalf("got err %v; wantErr %t", err, tt.wantErr) - } - if tt.wantErr { - // Don't verify the version on an expected error. - return - } - if got := string(conn.ClientVersion()); got != tt.version { - t.Fatalf("got %q; want %q", got, tt.version) - } - }) - } -} - -func TestHostKeyCheck(t *testing.T) { - for _, tt := range []struct { - name string - wantError string - key PublicKey - }{ - {"no callback", "must specify HostKeyCallback", nil}, - {"correct key", "", testSigners["rsa"].PublicKey()}, - {"mismatch", "mismatch", testSigners["ecdsa"].PublicKey()}, - } { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - serverConf := &ServerConfig{ - NoClientAuth: true, - } - serverConf.AddHostKey(testSigners["rsa"]) - - go NewServerConn(c1, serverConf) - clientConf := ClientConfig{ - User: "user", - } - if tt.key != nil { - clientConf.HostKeyCallback = FixedHostKey(tt.key) - } - - _, _, _, err = NewClientConn(c2, "", &clientConf) - if err != nil { - if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { - t.Errorf("%s: got error %q, missing %q", tt.name, err.Error(), tt.wantError) - } - } else if tt.wantError != "" { - t.Errorf("%s: succeeded, but want error string %q", tt.name, tt.wantError) - } - } -} - -func TestBannerCallback(t *testing.T) { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - serverConf := &ServerConfig{ - PasswordCallback: func(conn ConnMetadata, password []byte) (*Permissions, error) { - return &Permissions{}, nil - }, - BannerCallback: func(conn ConnMetadata) string { - return "Hello World" - }, - } - serverConf.AddHostKey(testSigners["rsa"]) - go NewServerConn(c1, serverConf) - - var receivedBanner string - var bannerCount int - clientConf := ClientConfig{ - Auth: []AuthMethod{ - Password("123"), - }, - User: "user", - HostKeyCallback: InsecureIgnoreHostKey(), - BannerCallback: func(message string) error { - bannerCount++ - receivedBanner = message - return nil - }, - } - - _, _, _, err = NewClientConn(c2, "", &clientConf) - if err != nil { - t.Fatal(err) - } - - if bannerCount != 1 { - t.Errorf("got %d banners; want 1", bannerCount) - } - - expected := "Hello World" - if receivedBanner != expected { - t.Fatalf("got %s; want %s", receivedBanner, expected) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 04f3620b..00000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = supportedKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681..00000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c..00000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go deleted file mode 100644 index b910c7bf..00000000 --- a/vendor/golang.org/x/crypto/ssh/example_test.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh_test - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "path/filepath" - "strings" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/terminal" -) - -func ExampleNewServerConn() { - // Public key authentication is done by comparing - // the public key of a received connection - // with the entries in the authorized_keys file. - authorizedKeysBytes, err := ioutil.ReadFile("authorized_keys") - if err != nil { - log.Fatalf("Failed to load authorized_keys, err: %v", err) - } - - authorizedKeysMap := map[string]bool{} - for len(authorizedKeysBytes) > 0 { - pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes) - if err != nil { - log.Fatal(err) - } - - authorizedKeysMap[string(pubKey.Marshal())] = true - authorizedKeysBytes = rest - } - - // An SSH server is represented by a ServerConfig, which holds - // certificate details and handles authentication of ServerConns. - config := &ssh.ServerConfig{ - // Remove to disable password auth. - PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { - // Should use constant-time compare (or better, salt+hash) in - // a production setting. - if c.User() == "testuser" && string(pass) == "tiger" { - return nil, nil - } - return nil, fmt.Errorf("password rejected for %q", c.User()) - }, - - // Remove to disable public key auth. - PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { - if authorizedKeysMap[string(pubKey.Marshal())] { - return &ssh.Permissions{ - // Record the public key used for authentication. - Extensions: map[string]string{ - "pubkey-fp": ssh.FingerprintSHA256(pubKey), - }, - }, nil - } - return nil, fmt.Errorf("unknown public key for %q", c.User()) - }, - } - - privateBytes, err := ioutil.ReadFile("id_rsa") - if err != nil { - log.Fatal("Failed to load private key: ", err) - } - - private, err := ssh.ParsePrivateKey(privateBytes) - if err != nil { - log.Fatal("Failed to parse private key: ", err) - } - - config.AddHostKey(private) - - // Once a ServerConfig has been configured, connections can be - // accepted. - listener, err := net.Listen("tcp", "0.0.0.0:2022") - if err != nil { - log.Fatal("failed to listen for connection: ", err) - } - nConn, err := listener.Accept() - if err != nil { - log.Fatal("failed to accept incoming connection: ", err) - } - - // Before use, a handshake must be performed on the incoming - // net.Conn. - conn, chans, reqs, err := ssh.NewServerConn(nConn, config) - if err != nil { - log.Fatal("failed to handshake: ", err) - } - log.Printf("logged in with key %s", conn.Permissions.Extensions["pubkey-fp"]) - - // The incoming Request channel must be serviced. - go ssh.DiscardRequests(reqs) - - // Service the incoming Channel channel. - for newChannel := range chans { - // Channels have a type, depending on the application level - // protocol intended. In the case of a shell, the type is - // "session" and ServerShell may be used to present a simple - // terminal interface. - if newChannel.ChannelType() != "session" { - newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") - continue - } - channel, requests, err := newChannel.Accept() - if err != nil { - log.Fatalf("Could not accept channel: %v", err) - } - - // Sessions have out-of-band requests such as "shell", - // "pty-req" and "env". Here we handle only the - // "shell" request. - go func(in <-chan *ssh.Request) { - for req := range in { - req.Reply(req.Type == "shell", nil) - } - }(requests) - - term := terminal.NewTerminal(channel, "> ") - - go func() { - defer channel.Close() - for { - line, err := term.ReadLine() - if err != nil { - break - } - fmt.Println(line) - } - }() - } -} - -func ExampleHostKeyCheck() { - // Every client must provide a host key check. Here is a - // simple-minded parse of OpenSSH's known_hosts file - host := "hostname" - file, err := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) - if err != nil { - log.Fatal(err) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - var hostKey ssh.PublicKey - for scanner.Scan() { - fields := strings.Split(scanner.Text(), " ") - if len(fields) != 3 { - continue - } - if strings.Contains(fields[0], host) { - var err error - hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) - if err != nil { - log.Fatalf("error parsing %q: %v", fields[2], err) - } - break - } - } - - if hostKey == nil { - log.Fatalf("no hostkey for %s", host) - } - - config := ssh.ClientConfig{ - User: os.Getenv("USER"), - HostKeyCallback: ssh.FixedHostKey(hostKey), - } - - _, err = ssh.Dial("tcp", host+":22", &config) - log.Println(err) -} - -func ExampleDial() { - var hostKey ssh.PublicKey - // An SSH client is represented with a ClientConn. - // - // To authenticate with the remote server you must pass at least one - // implementation of AuthMethod via the Auth field in ClientConfig, - // and provide a HostKeyCallback. - config := &ssh.ClientConfig{ - User: "username", - Auth: []ssh.AuthMethod{ - ssh.Password("yourpassword"), - }, - HostKeyCallback: ssh.FixedHostKey(hostKey), - } - client, err := ssh.Dial("tcp", "yourserver.com:22", config) - if err != nil { - log.Fatal("Failed to dial: ", err) - } - - // Each ClientConn can support multiple interactive sessions, - // represented by a Session. - session, err := client.NewSession() - if err != nil { - log.Fatal("Failed to create session: ", err) - } - defer session.Close() - - // Once a Session is created, you can execute a single command on - // the remote side using the Run method. - var b bytes.Buffer - session.Stdout = &b - if err := session.Run("/usr/bin/whoami"); err != nil { - log.Fatal("Failed to run: " + err.Error()) - } - fmt.Println(b.String()) -} - -func ExamplePublicKeys() { - var hostKey ssh.PublicKey - // A public key may be used to authenticate against the remote - // server by using an unencrypted PEM-encoded private key file. - // - // If you have an encrypted private key, the crypto/x509 package - // can be used to decrypt it. - key, err := ioutil.ReadFile("/home/user/.ssh/id_rsa") - if err != nil { - log.Fatalf("unable to read private key: %v", err) - } - - // Create the Signer for this private key. - signer, err := ssh.ParsePrivateKey(key) - if err != nil { - log.Fatalf("unable to parse private key: %v", err) - } - - config := &ssh.ClientConfig{ - User: "user", - Auth: []ssh.AuthMethod{ - // Use the PublicKeys method for remote authentication. - ssh.PublicKeys(signer), - }, - HostKeyCallback: ssh.FixedHostKey(hostKey), - } - - // Connect to the remote server and perform the SSH handshake. - client, err := ssh.Dial("tcp", "host.com:22", config) - if err != nil { - log.Fatalf("unable to connect: %v", err) - } - defer client.Close() -} - -func ExampleClient_Listen() { - var hostKey ssh.PublicKey - config := &ssh.ClientConfig{ - User: "username", - Auth: []ssh.AuthMethod{ - ssh.Password("password"), - }, - HostKeyCallback: ssh.FixedHostKey(hostKey), - } - // Dial your ssh server. - conn, err := ssh.Dial("tcp", "localhost:22", config) - if err != nil { - log.Fatal("unable to connect: ", err) - } - defer conn.Close() - - // Request the remote side to open port 8080 on all interfaces. - l, err := conn.Listen("tcp", "0.0.0.0:8080") - if err != nil { - log.Fatal("unable to register tcp forward: ", err) - } - defer l.Close() - - // Serve HTTP with your SSH server acting as a reverse proxy. - http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { - fmt.Fprintf(resp, "Hello world!\n") - })) -} - -func ExampleSession_RequestPty() { - var hostKey ssh.PublicKey - // Create client config - config := &ssh.ClientConfig{ - User: "username", - Auth: []ssh.AuthMethod{ - ssh.Password("password"), - }, - HostKeyCallback: ssh.FixedHostKey(hostKey), - } - // Connect to ssh server - conn, err := ssh.Dial("tcp", "localhost:22", config) - if err != nil { - log.Fatal("unable to connect: ", err) - } - defer conn.Close() - // Create a session - session, err := conn.NewSession() - if err != nil { - log.Fatal("unable to create session: ", err) - } - defer session.Close() - // Set up terminal modes - modes := ssh.TerminalModes{ - ssh.ECHO: 0, // disable echoing - ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud - ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud - } - // Request pseudo terminal - if err := session.RequestPty("xterm", 40, 80, modes); err != nil { - log.Fatal("request for pseudo terminal failed: ", err) - } - // Start remote shell - if err := session.Shell(); err != nil { - log.Fatal("failed to start shell: ", err) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 4f7912ec..00000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - if len(t.hostKeys) == 0 { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go deleted file mode 100644 index 91d49356..00000000 --- a/vendor/golang.org/x/crypto/ssh/handshake_test.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto/rand" - "errors" - "fmt" - "io" - "net" - "reflect" - "runtime" - "strings" - "sync" - "testing" -) - -type testChecker struct { - calls []string -} - -func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { - if dialAddr == "bad" { - return fmt.Errorf("dialAddr is bad") - } - - if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil { - return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr) - } - - t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal())) - - return nil -} - -// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and -// therefore is buffered (net.Pipe deadlocks if both sides start with -// a write.) -func netPipe() (net.Conn, net.Conn, error) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - listener, err = net.Listen("tcp", "[::1]:0") - if err != nil { - return nil, nil, err - } - } - defer listener.Close() - c1, err := net.Dial("tcp", listener.Addr().String()) - if err != nil { - return nil, nil, err - } - - c2, err := listener.Accept() - if err != nil { - c1.Close() - return nil, nil, err - } - - return c1, c2, nil -} - -// noiseTransport inserts ignore messages to check that the read loop -// and the key exchange filters out these messages. -type noiseTransport struct { - keyingTransport -} - -func (t *noiseTransport) writePacket(p []byte) error { - ignore := []byte{msgIgnore} - if err := t.keyingTransport.writePacket(ignore); err != nil { - return err - } - debug := []byte{msgDebug, 1, 2, 3} - if err := t.keyingTransport.writePacket(debug); err != nil { - return err - } - - return t.keyingTransport.writePacket(p) -} - -func addNoiseTransport(t keyingTransport) keyingTransport { - return &noiseTransport{t} -} - -// handshakePair creates two handshakeTransports connected with each -// other. If the noise argument is true, both transports will try to -// confuse the other side by sending ignore and debug messages. -func handshakePair(clientConf *ClientConfig, addr string, noise bool) (client *handshakeTransport, server *handshakeTransport, err error) { - a, b, err := netPipe() - if err != nil { - return nil, nil, err - } - - var trC, trS keyingTransport - - trC = newTransport(a, rand.Reader, true) - trS = newTransport(b, rand.Reader, false) - if noise { - trC = addNoiseTransport(trC) - trS = addNoiseTransport(trS) - } - clientConf.SetDefaults() - - v := []byte("version") - client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr()) - - serverConf := &ServerConfig{} - serverConf.AddHostKey(testSigners["ecdsa"]) - serverConf.AddHostKey(testSigners["rsa"]) - serverConf.SetDefaults() - server = newServerTransport(trS, v, v, serverConf) - - if err := server.waitSession(); err != nil { - return nil, nil, fmt.Errorf("server.waitSession: %v", err) - } - if err := client.waitSession(); err != nil { - return nil, nil, fmt.Errorf("client.waitSession: %v", err) - } - - return client, server, nil -} - -func TestHandshakeBasic(t *testing.T) { - if runtime.GOOS == "plan9" { - t.Skip("see golang.org/issue/7237") - } - - checker := &syncChecker{ - waitCall: make(chan int, 10), - called: make(chan int, 10), - } - - checker.waitCall <- 1 - trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) - if err != nil { - t.Fatalf("handshakePair: %v", err) - } - - defer trC.Close() - defer trS.Close() - - // Let first kex complete normally. - <-checker.called - - clientDone := make(chan int, 0) - gotHalf := make(chan int, 0) - const N = 20 - - go func() { - defer close(clientDone) - // Client writes a bunch of stuff, and does a key - // change in the middle. This should not confuse the - // handshake in progress. We do this twice, so we test - // that the packet buffer is reset correctly. - for i := 0; i < N; i++ { - p := []byte{msgRequestSuccess, byte(i)} - if err := trC.writePacket(p); err != nil { - t.Fatalf("sendPacket: %v", err) - } - if (i % 10) == 5 { - <-gotHalf - // halfway through, we request a key change. - trC.requestKeyExchange() - - // Wait until we can be sure the key - // change has really started before we - // write more. - <-checker.called - } - if (i % 10) == 7 { - // write some packets until the kex - // completes, to test buffering of - // packets. - checker.waitCall <- 1 - } - } - }() - - // Server checks that client messages come in cleanly - i := 0 - err = nil - for ; i < N; i++ { - var p []byte - p, err = trS.readPacket() - if err != nil { - break - } - if (i % 10) == 5 { - gotHalf <- 1 - } - - want := []byte{msgRequestSuccess, byte(i)} - if bytes.Compare(p, want) != 0 { - t.Errorf("message %d: got %v, want %v", i, p, want) - } - } - <-clientDone - if err != nil && err != io.EOF { - t.Fatalf("server error: %v", err) - } - if i != N { - t.Errorf("received %d messages, want 10.", i) - } - - close(checker.called) - if _, ok := <-checker.called; ok { - // If all went well, we registered exactly 2 key changes: one - // that establishes the session, and one that we requested - // additionally. - t.Fatalf("got another host key checks after 2 handshakes") - } -} - -func TestForceFirstKex(t *testing.T) { - // like handshakePair, but must access the keyingTransport. - checker := &testChecker{} - clientConf := &ClientConfig{HostKeyCallback: checker.Check} - a, b, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - - var trC, trS keyingTransport - - trC = newTransport(a, rand.Reader, true) - - // This is the disallowed packet: - trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})) - - // Rest of the setup. - trS = newTransport(b, rand.Reader, false) - clientConf.SetDefaults() - - v := []byte("version") - client := newClientTransport(trC, v, v, clientConf, "addr", a.RemoteAddr()) - - serverConf := &ServerConfig{} - serverConf.AddHostKey(testSigners["ecdsa"]) - serverConf.AddHostKey(testSigners["rsa"]) - serverConf.SetDefaults() - server := newServerTransport(trS, v, v, serverConf) - - defer client.Close() - defer server.Close() - - // We setup the initial key exchange, but the remote side - // tries to send serviceRequestMsg in cleartext, which is - // disallowed. - - if err := server.waitSession(); err == nil { - t.Errorf("server first kex init should reject unexpected packet") - } -} - -func TestHandshakeAutoRekeyWrite(t *testing.T) { - checker := &syncChecker{ - called: make(chan int, 10), - waitCall: nil, - } - clientConf := &ClientConfig{HostKeyCallback: checker.Check} - clientConf.RekeyThreshold = 500 - trC, trS, err := handshakePair(clientConf, "addr", false) - if err != nil { - t.Fatalf("handshakePair: %v", err) - } - defer trC.Close() - defer trS.Close() - - input := make([]byte, 251) - input[0] = msgRequestSuccess - - done := make(chan int, 1) - const numPacket = 5 - go func() { - defer close(done) - j := 0 - for ; j < numPacket; j++ { - if p, err := trS.readPacket(); err != nil { - break - } else if !bytes.Equal(input, p) { - t.Errorf("got packet type %d, want %d", p[0], input[0]) - } - } - - if j != numPacket { - t.Errorf("got %d, want 5 messages", j) - } - }() - - <-checker.called - - for i := 0; i < numPacket; i++ { - p := make([]byte, len(input)) - copy(p, input) - if err := trC.writePacket(p); err != nil { - t.Errorf("writePacket: %v", err) - } - if i == 2 { - // Make sure the kex is in progress. - <-checker.called - } - - } - <-done -} - -type syncChecker struct { - waitCall chan int - called chan int -} - -func (c *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { - c.called <- 1 - if c.waitCall != nil { - <-c.waitCall - } - return nil -} - -func TestHandshakeAutoRekeyRead(t *testing.T) { - sync := &syncChecker{ - called: make(chan int, 2), - waitCall: nil, - } - clientConf := &ClientConfig{ - HostKeyCallback: sync.Check, - } - clientConf.RekeyThreshold = 500 - - trC, trS, err := handshakePair(clientConf, "addr", false) - if err != nil { - t.Fatalf("handshakePair: %v", err) - } - defer trC.Close() - defer trS.Close() - - packet := make([]byte, 501) - packet[0] = msgRequestSuccess - if err := trS.writePacket(packet); err != nil { - t.Fatalf("writePacket: %v", err) - } - - // While we read out the packet, a key change will be - // initiated. - done := make(chan int, 1) - go func() { - defer close(done) - if _, err := trC.readPacket(); err != nil { - t.Fatalf("readPacket(client): %v", err) - } - - }() - - <-done - <-sync.called -} - -// errorKeyingTransport generates errors after a given number of -// read/write operations. -type errorKeyingTransport struct { - packetConn - readLeft, writeLeft int -} - -func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { - return nil -} - -func (n *errorKeyingTransport) getSessionID() []byte { - return nil -} - -func (n *errorKeyingTransport) writePacket(packet []byte) error { - if n.writeLeft == 0 { - n.Close() - return errors.New("barf") - } - - n.writeLeft-- - return n.packetConn.writePacket(packet) -} - -func (n *errorKeyingTransport) readPacket() ([]byte, error) { - if n.readLeft == 0 { - n.Close() - return nil, errors.New("barf") - } - - n.readLeft-- - return n.packetConn.readPacket() -} - -func TestHandshakeErrorHandlingRead(t *testing.T) { - for i := 0; i < 20; i++ { - testHandshakeErrorHandlingN(t, i, -1, false) - } -} - -func TestHandshakeErrorHandlingWrite(t *testing.T) { - for i := 0; i < 20; i++ { - testHandshakeErrorHandlingN(t, -1, i, false) - } -} - -func TestHandshakeErrorHandlingReadCoupled(t *testing.T) { - for i := 0; i < 20; i++ { - testHandshakeErrorHandlingN(t, i, -1, true) - } -} - -func TestHandshakeErrorHandlingWriteCoupled(t *testing.T) { - for i := 0; i < 20; i++ { - testHandshakeErrorHandlingN(t, -1, i, true) - } -} - -// testHandshakeErrorHandlingN runs handshakes, injecting errors. If -// handshakeTransport deadlocks, the go runtime will detect it and -// panic. -func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int, coupled bool) { - msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) - - a, b := memPipe() - defer a.Close() - defer b.Close() - - key := testSigners["ecdsa"] - serverConf := Config{RekeyThreshold: minRekeyThreshold} - serverConf.SetDefaults() - serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) - serverConn.hostKeys = []Signer{key} - go serverConn.readLoop() - go serverConn.kexLoop() - - clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} - clientConf.SetDefaults() - clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) - clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} - clientConn.hostKeyCallback = InsecureIgnoreHostKey() - go clientConn.readLoop() - go clientConn.kexLoop() - - var wg sync.WaitGroup - - for _, hs := range []packetConn{serverConn, clientConn} { - if !coupled { - wg.Add(2) - go func(c packetConn) { - for i := 0; ; i++ { - str := fmt.Sprintf("%08x", i) + strings.Repeat("x", int(minRekeyThreshold)/4-8) - err := c.writePacket(Marshal(&serviceRequestMsg{str})) - if err != nil { - break - } - } - wg.Done() - c.Close() - }(hs) - go func(c packetConn) { - for { - _, err := c.readPacket() - if err != nil { - break - } - } - wg.Done() - }(hs) - } else { - wg.Add(1) - go func(c packetConn) { - for { - _, err := c.readPacket() - if err != nil { - break - } - if err := c.writePacket(msg); err != nil { - break - } - - } - wg.Done() - }(hs) - } - } - wg.Wait() -} - -func TestDisconnect(t *testing.T) { - if runtime.GOOS == "plan9" { - t.Skip("see golang.org/issue/7237") - } - checker := &testChecker{} - trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) - if err != nil { - t.Fatalf("handshakePair: %v", err) - } - - defer trC.Close() - defer trS.Close() - - trC.writePacket([]byte{msgRequestSuccess, 0, 0}) - errMsg := &disconnectMsg{ - Reason: 42, - Message: "such is life", - } - trC.writePacket(Marshal(errMsg)) - trC.writePacket([]byte{msgRequestSuccess, 0, 0}) - - packet, err := trS.readPacket() - if err != nil { - t.Fatalf("readPacket 1: %v", err) - } - if packet[0] != msgRequestSuccess { - t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess) - } - - _, err = trS.readPacket() - if err == nil { - t.Errorf("readPacket 2 succeeded") - } else if !reflect.DeepEqual(err, errMsg) { - t.Errorf("got error %#v, want %#v", err, errMsg) - } - - _, err = trS.readPacket() - if err == nil { - t.Errorf("readPacket 3 succeeded") - } -} - -func TestHandshakeRekeyDefault(t *testing.T) { - clientConf := &ClientConfig{ - Config: Config{ - Ciphers: []string{"aes128-ctr"}, - }, - HostKeyCallback: InsecureIgnoreHostKey(), - } - trC, trS, err := handshakePair(clientConf, "addr", false) - if err != nil { - t.Fatalf("handshakePair: %v", err) - } - defer trC.Close() - defer trS.Close() - - trC.writePacket([]byte{msgRequestSuccess, 0, 0}) - trC.Close() - - rgb := (1024 + trC.readBytesLeft) >> 30 - wgb := (1024 + trC.writeBytesLeft) >> 30 - - if rgb != 64 { - t.Errorf("got rekey after %dG read, want 64G", rgb) - } - if wgb != 64 { - t.Errorf("got rekey after %dG write, want 64G", wgb) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index f34bcc01..00000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, nil -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go deleted file mode 100644 index 12ca0acd..00000000 --- a/vendor/golang.org/x/crypto/ssh/kex_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Key exchange tests. - -import ( - "crypto/rand" - "reflect" - "testing" -) - -func TestKexes(t *testing.T) { - type kexResultErr struct { - result *kexResult - err error - } - - for name, kex := range kexAlgoMap { - a, b := memPipe() - - s := make(chan kexResultErr, 1) - c := make(chan kexResultErr, 1) - var magics handshakeMagics - go func() { - r, e := kex.Client(a, rand.Reader, &magics) - a.Close() - c <- kexResultErr{r, e} - }() - go func() { - r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"]) - b.Close() - s <- kexResultErr{r, e} - }() - - clientRes := <-c - serverRes := <-s - if clientRes.err != nil { - t.Errorf("client: %v", clientRes.err) - } - if serverRes.err != nil { - t.Errorf("server: %v", serverRes.err) - } - if !reflect.DeepEqual(clientRes.result, serverRes.result) { - t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result) - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 73697ded..00000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != r.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := ed25519.PublicKey(w.KeyBytes) - - return (ed25519PublicKey)(key), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - var hashFunc crypto.Hash - - switch key := s.pubKey.(type) { - case *rsaPublicKey, *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: s.pubKey.Type(), - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - buf := block.Bytes - - if encryptedBlock(block) { - if x509.IsEncryptedPEMBlock(block) { - var err error - buf, err = x509.DecryptPEMBlock(block, passPhrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - } - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { - magic := append([]byte("openssh-key-v1"), 0) - if !bytes.Equal(magic, key[0:len(magic)]) { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") - } - - // we only handle ed25519 and rsa keys currently - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go deleted file mode 100644 index 9a90abc0..00000000 --- a/vendor/golang.org/x/crypto/ssh/keys_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "fmt" - "reflect" - "strings" - "testing" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/testdata" -) - -func rawKey(pub PublicKey) interface{} { - switch k := pub.(type) { - case *rsaPublicKey: - return (*rsa.PublicKey)(k) - case *dsaPublicKey: - return (*dsa.PublicKey)(k) - case *ecdsaPublicKey: - return (*ecdsa.PublicKey)(k) - case ed25519PublicKey: - return (ed25519.PublicKey)(k) - case *Certificate: - return k - } - panic("unknown key type") -} - -func TestKeyMarshalParse(t *testing.T) { - for _, priv := range testSigners { - pub := priv.PublicKey() - roundtrip, err := ParsePublicKey(pub.Marshal()) - if err != nil { - t.Errorf("ParsePublicKey(%T): %v", pub, err) - } - - k1 := rawKey(pub) - k2 := rawKey(roundtrip) - - if !reflect.DeepEqual(k1, k2) { - t.Errorf("got %#v in roundtrip, want %#v", k2, k1) - } - } -} - -func TestUnsupportedCurves(t *testing.T) { - raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) - if err != nil { - t.Fatalf("GenerateKey: %v", err) - } - - if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { - t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) - } - - if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { - t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) - } -} - -func TestNewPublicKey(t *testing.T) { - for _, k := range testSigners { - raw := rawKey(k.PublicKey()) - // Skip certificates, as NewPublicKey does not support them. - if _, ok := raw.(*Certificate); ok { - continue - } - pub, err := NewPublicKey(raw) - if err != nil { - t.Errorf("NewPublicKey(%#v): %v", raw, err) - } - if !reflect.DeepEqual(k.PublicKey(), pub) { - t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey()) - } - } -} - -func TestKeySignVerify(t *testing.T) { - for _, priv := range testSigners { - pub := priv.PublicKey() - - data := []byte("sign me") - sig, err := priv.Sign(rand.Reader, data) - if err != nil { - t.Fatalf("Sign(%T): %v", priv, err) - } - - if err := pub.Verify(data, sig); err != nil { - t.Errorf("publicKey.Verify(%T): %v", priv, err) - } - sig.Blob[5]++ - if err := pub.Verify(data, sig); err == nil { - t.Errorf("publicKey.Verify on broken sig did not fail") - } - } -} - -func TestParseRSAPrivateKey(t *testing.T) { - key := testPrivateKeys["rsa"] - - rsa, ok := key.(*rsa.PrivateKey) - if !ok { - t.Fatalf("got %T, want *rsa.PrivateKey", rsa) - } - - if err := rsa.Validate(); err != nil { - t.Errorf("Validate: %v", err) - } -} - -func TestParseECPrivateKey(t *testing.T) { - key := testPrivateKeys["ecdsa"] - - ecKey, ok := key.(*ecdsa.PrivateKey) - if !ok { - t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey) - } - - if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) { - t.Fatalf("public key does not validate.") - } -} - -// See Issue https://github.com/golang/go/issues/6650. -func TestParseEncryptedPrivateKeysFails(t *testing.T) { - const wantSubstring = "encrypted" - for i, tt := range testdata.PEMEncryptedKeys { - _, err := ParsePrivateKey(tt.PEMBytes) - if err == nil { - t.Errorf("#%d key %s: ParsePrivateKey successfully parsed, expected an error", i, tt.Name) - continue - } - - if !strings.Contains(err.Error(), wantSubstring) { - t.Errorf("#%d key %s: got error %q, want substring %q", i, tt.Name, err, wantSubstring) - } - } -} - -// Parse encrypted private keys with passphrase -func TestParseEncryptedPrivateKeysWithPassphrase(t *testing.T) { - data := []byte("sign me") - for _, tt := range testdata.PEMEncryptedKeys { - s, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte(tt.EncryptionKey)) - if err != nil { - t.Fatalf("ParsePrivateKeyWithPassphrase returned error: %s", err) - continue - } - sig, err := s.Sign(rand.Reader, data) - if err != nil { - t.Fatalf("dsa.Sign: %v", err) - } - if err := s.PublicKey().Verify(data, sig); err != nil { - t.Errorf("Verify failed: %v", err) - } - } - - tt := testdata.PEMEncryptedKeys[0] - _, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte("incorrect")) - if err != x509.IncorrectPasswordError { - t.Fatalf("got %v want IncorrectPasswordError", err) - } -} - -func TestParseDSA(t *testing.T) { - // We actually exercise the ParsePrivateKey codepath here, as opposed to - // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go - // uses. - s, err := ParsePrivateKey(testdata.PEMBytes["dsa"]) - if err != nil { - t.Fatalf("ParsePrivateKey returned error: %s", err) - } - - data := []byte("sign me") - sig, err := s.Sign(rand.Reader, data) - if err != nil { - t.Fatalf("dsa.Sign: %v", err) - } - - if err := s.PublicKey().Verify(data, sig); err != nil { - t.Errorf("Verify failed: %v", err) - } -} - -// Tests for authorized_keys parsing. - -// getTestKey returns a public key, and its base64 encoding. -func getTestKey() (PublicKey, string) { - k := testPublicKeys["rsa"] - - b := &bytes.Buffer{} - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(k.Marshal()) - e.Close() - - return k, b.String() -} - -func TestMarshalParsePublicKey(t *testing.T) { - pub, pubSerialized := getTestKey() - line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized) - - authKeys := MarshalAuthorizedKey(pub) - actualFields := strings.Fields(string(authKeys)) - if len(actualFields) == 0 { - t.Fatalf("failed authKeys: %v", authKeys) - } - - // drop the comment - expectedFields := strings.Fields(line)[0:2] - - if !reflect.DeepEqual(actualFields, expectedFields) { - t.Errorf("got %v, expected %v", actualFields, expectedFields) - } - - actPub, _, _, _, err := ParseAuthorizedKey([]byte(line)) - if err != nil { - t.Fatalf("cannot parse %v: %v", line, err) - } - if !reflect.DeepEqual(actPub, pub) { - t.Errorf("got %v, expected %v", actPub, pub) - } -} - -type testAuthResult struct { - pubKey PublicKey - options []string - comments string - rest string - ok bool -} - -func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []testAuthResult) { - rest := authKeys - var values []testAuthResult - for len(rest) > 0 { - var r testAuthResult - var err error - r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest) - r.ok = (err == nil) - t.Log(err) - r.rest = string(rest) - values = append(values, r) - } - - if !reflect.DeepEqual(values, expected) { - t.Errorf("got %#v, expected %#v", values, expected) - } -} - -func TestAuthorizedKeyBasic(t *testing.T) { - pub, pubSerialized := getTestKey() - line := "ssh-rsa " + pubSerialized + " user@host" - testAuthorizedKeys(t, []byte(line), - []testAuthResult{ - {pub, nil, "user@host", "", true}, - }) -} - -func TestAuth(t *testing.T) { - pub, pubSerialized := getTestKey() - authWithOptions := []string{ - `# comments to ignore before any keys...`, - ``, - `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`, - `# comments to ignore, along with a blank line`, - ``, - `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`, - ``, - `# more comments, plus a invalid entry`, - `ssh-rsa data-that-will-not-parse user@host3`, - } - for _, eol := range []string{"\n", "\r\n"} { - authOptions := strings.Join(authWithOptions, eol) - rest2 := strings.Join(authWithOptions[3:], eol) - rest3 := strings.Join(authWithOptions[6:], eol) - testAuthorizedKeys(t, []byte(authOptions), []testAuthResult{ - {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true}, - {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true}, - {nil, nil, "", "", false}, - }) - } -} - -func TestAuthWithQuotedSpaceInEnv(t *testing.T) { - pub, pubSerialized := getTestKey() - authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) - testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []testAuthResult{ - {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true}, - }) -} - -func TestAuthWithQuotedCommaInEnv(t *testing.T) { - pub, pubSerialized := getTestKey() - authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) - testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []testAuthResult{ - {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true}, - }) -} - -func TestAuthWithQuotedQuoteInEnv(t *testing.T) { - pub, pubSerialized := getTestKey() - authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`) - authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`) - testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []testAuthResult{ - {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true}, - }) - - testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []testAuthResult{ - {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true}, - }) -} - -func TestAuthWithInvalidSpace(t *testing.T) { - _, pubSerialized := getTestKey() - authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host -#more to follow but still no valid keys`) - testAuthorizedKeys(t, []byte(authWithInvalidSpace), []testAuthResult{ - {nil, nil, "", "", false}, - }) -} - -func TestAuthWithMissingQuote(t *testing.T) { - pub, pubSerialized := getTestKey() - authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host -env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`) - - testAuthorizedKeys(t, []byte(authWithMissingQuote), []testAuthResult{ - {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true}, - }) -} - -func TestInvalidEntry(t *testing.T) { - authInvalid := []byte(`ssh-rsa`) - _, _, _, _, err := ParseAuthorizedKey(authInvalid) - if err == nil { - t.Errorf("got valid entry for %q", authInvalid) - } -} - -var knownHostsParseTests = []struct { - input string - err string - - marker string - comment string - hosts []string - rest string -}{ - { - "", - "EOF", - - "", "", nil, "", - }, - { - "# Just a comment", - "EOF", - - "", "", nil, "", - }, - { - " \t ", - "EOF", - - "", "", nil, "", - }, - { - "localhost ssh-rsa {RSAPUB}", - "", - - "", "", []string{"localhost"}, "", - }, - { - "localhost\tssh-rsa {RSAPUB}", - "", - - "", "", []string{"localhost"}, "", - }, - { - "localhost\tssh-rsa {RSAPUB}\tcomment comment", - "", - - "", "comment comment", []string{"localhost"}, "", - }, - { - "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", - "", - - "", "comment comment", []string{"localhost"}, "", - }, - { - "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", - "", - - "", "comment comment", []string{"localhost"}, "", - }, - { - "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", - "", - - "", "comment comment", []string{"localhost"}, "next line", - }, - { - "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", - "", - - "", "comment comment", []string{"localhost", "[host2:123]"}, "", - }, - { - "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", - "", - - "marker", "", []string{"localhost", "[host2:123]"}, "", - }, - { - "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", - "short read", - - "", "", nil, "", - }, -} - -func TestKnownHostsParsing(t *testing.T) { - rsaPub, rsaPubSerialized := getTestKey() - - for i, test := range knownHostsParseTests { - var expectedKey PublicKey - const rsaKeyToken = "{RSAPUB}" - - input := test.input - if strings.Contains(input, rsaKeyToken) { - expectedKey = rsaPub - input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) - } - - marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) - if err != nil { - if len(test.err) == 0 { - t.Errorf("#%d: unexpectedly failed with %q", i, err) - } else if !strings.Contains(err.Error(), test.err) { - t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) - } - continue - } else if len(test.err) != 0 { - t.Errorf("#%d: succeeded but expected error including %q", i, test.err) - continue - } - - if !reflect.DeepEqual(expectedKey, pubKey) { - t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) - } - - if marker != test.marker { - t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) - } - - if comment != test.comment { - t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) - } - - if !reflect.DeepEqual(test.hosts, hosts) { - t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) - } - - if rest := string(rest); rest != test.rest { - t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) - } - } -} - -func TestFingerprintLegacyMD5(t *testing.T) { - pub, _ := getTestKey() - fingerprint := FingerprintLegacyMD5(pub) - want := "fb:61:6d:1a:e3:f0:95:45:3c:a0:79:be:4a:93:63:66" // ssh-keygen -lf -E md5 rsa - if fingerprint != want { - t.Errorf("got fingerprint %q want %q", fingerprint, want) - } -} - -func TestFingerprintSHA256(t *testing.T) { - pub, _ := getTestKey() - fingerprint := FingerprintSHA256(pub) - want := "SHA256:Anr3LjZK8YVpjrxu79myrW9Hrb/wpcMNpVvTq/RcBm8" // ssh-keygen -lf rsa - if fingerprint != want { - t.Errorf("got fingerprint %q want %q", fingerprint, want) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a0628..00000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go deleted file mode 100644 index 8697cd61..00000000 --- a/vendor/golang.org/x/crypto/ssh/mempipe_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" - "testing" -) - -// An in-memory packetConn. It is safe to call Close and writePacket -// from different goroutines. -type memTransport struct { - eof bool - pending [][]byte - write *memTransport - sync.Mutex - *sync.Cond -} - -func (t *memTransport) readPacket() ([]byte, error) { - t.Lock() - defer t.Unlock() - for { - if len(t.pending) > 0 { - r := t.pending[0] - t.pending = t.pending[1:] - return r, nil - } - if t.eof { - return nil, io.EOF - } - t.Cond.Wait() - } -} - -func (t *memTransport) closeSelf() error { - t.Lock() - defer t.Unlock() - if t.eof { - return io.EOF - } - t.eof = true - t.Cond.Broadcast() - return nil -} - -func (t *memTransport) Close() error { - err := t.write.closeSelf() - t.closeSelf() - return err -} - -func (t *memTransport) writePacket(p []byte) error { - t.write.Lock() - defer t.write.Unlock() - if t.write.eof { - return io.EOF - } - c := make([]byte, len(p)) - copy(c, p) - t.write.pending = append(t.write.pending, c) - t.write.Cond.Signal() - return nil -} - -func memPipe() (a, b packetConn) { - t1 := memTransport{} - t2 := memTransport{} - t1.write = &t2 - t2.write = &t1 - t1.Cond = sync.NewCond(&t1.Mutex) - t2.Cond = sync.NewCond(&t2.Mutex) - return &t1, &t2 -} - -func TestMemPipe(t *testing.T) { - a, b := memPipe() - if err := a.writePacket([]byte{42}); err != nil { - t.Fatalf("writePacket: %v", err) - } - if err := a.Close(); err != nil { - t.Fatal("Close: ", err) - } - p, err := b.readPacket() - if err != nil { - t.Fatal("readPacket: ", err) - } - if len(p) != 1 || p[0] != 42 { - t.Fatalf("got %v, want {42}", p) - } - p, err = b.readPacket() - if err != io.EOF { - t.Fatalf("got %v, %v, want EOF", p, err) - } -} - -func TestDoubleClose(t *testing.T) { - a, _ := memPipe() - err := a.Close() - if err != nil { - t.Errorf("Close: %v", err) - } - err = a.Close() - if err != io.EOF { - t.Errorf("expect EOF on double close.") - } -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index 08d28117..00000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,766 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go deleted file mode 100644 index e7907641..00000000 --- a/vendor/golang.org/x/crypto/ssh/messages_test.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "math/big" - "math/rand" - "reflect" - "testing" - "testing/quick" -) - -var intLengthTests = []struct { - val, length int -}{ - {0, 4 + 0}, - {1, 4 + 1}, - {127, 4 + 1}, - {128, 4 + 2}, - {-1, 4 + 1}, -} - -func TestIntLength(t *testing.T) { - for _, test := range intLengthTests { - v := new(big.Int).SetInt64(int64(test.val)) - length := intLength(v) - if length != test.length { - t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length) - } - } -} - -type msgAllTypes struct { - Bool bool `sshtype:"21"` - Array [16]byte - Uint64 uint64 - Uint32 uint32 - Uint8 uint8 - String string - Strings []string - Bytes []byte - Int *big.Int - Rest []byte `ssh:"rest"` -} - -func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value { - m := &msgAllTypes{} - m.Bool = rand.Intn(2) == 1 - randomBytes(m.Array[:], rand) - m.Uint64 = uint64(rand.Int63n(1<<63 - 1)) - m.Uint32 = uint32(rand.Intn((1 << 31) - 1)) - m.Uint8 = uint8(rand.Intn(1 << 8)) - m.String = string(m.Array[:]) - m.Strings = randomNameList(rand) - m.Bytes = m.Array[:] - m.Int = randomInt(rand) - m.Rest = m.Array[:] - return reflect.ValueOf(m) -} - -func TestMarshalUnmarshal(t *testing.T) { - rand := rand.New(rand.NewSource(0)) - iface := &msgAllTypes{} - ty := reflect.ValueOf(iface).Type() - - n := 100 - if testing.Short() { - n = 5 - } - for j := 0; j < n; j++ { - v, ok := quick.Value(ty, rand) - if !ok { - t.Errorf("failed to create value") - break - } - - m1 := v.Elem().Interface() - m2 := iface - - marshaled := Marshal(m1) - if err := Unmarshal(marshaled, m2); err != nil { - t.Errorf("Unmarshal %#v: %s", m1, err) - break - } - - if !reflect.DeepEqual(v.Interface(), m2) { - t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled) - break - } - } -} - -func TestUnmarshalEmptyPacket(t *testing.T) { - var b []byte - var m channelRequestSuccessMsg - if err := Unmarshal(b, &m); err == nil { - t.Fatalf("unmarshal of empty slice succeeded") - } -} - -func TestUnmarshalUnexpectedPacket(t *testing.T) { - type S struct { - I uint32 `sshtype:"43"` - S string - B bool - } - - s := S{11, "hello", true} - packet := Marshal(s) - packet[0] = 42 - roundtrip := S{} - err := Unmarshal(packet, &roundtrip) - if err == nil { - t.Fatal("expected error, not nil") - } -} - -func TestMarshalPtr(t *testing.T) { - s := struct { - S string - }{"hello"} - - m1 := Marshal(s) - m2 := Marshal(&s) - if !bytes.Equal(m1, m2) { - t.Errorf("got %q, want %q for marshaled pointer", m2, m1) - } -} - -func TestBareMarshalUnmarshal(t *testing.T) { - type S struct { - I uint32 - S string - B bool - } - - s := S{42, "hello", true} - packet := Marshal(s) - roundtrip := S{} - Unmarshal(packet, &roundtrip) - - if !reflect.DeepEqual(s, roundtrip) { - t.Errorf("got %#v, want %#v", roundtrip, s) - } -} - -func TestBareMarshal(t *testing.T) { - type S2 struct { - I uint32 - } - s := S2{42} - packet := Marshal(s) - i, rest, ok := parseUint32(packet) - if len(rest) > 0 || !ok { - t.Errorf("parseInt(%q): parse error", packet) - } - if i != s.I { - t.Errorf("got %d, want %d", i, s.I) - } -} - -func TestUnmarshalShortKexInitPacket(t *testing.T) { - // This used to panic. - // Issue 11348 - packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} - kim := &kexInitMsg{} - if err := Unmarshal(packet, kim); err == nil { - t.Error("truncated packet unmarshaled without error") - } -} - -func TestMarshalMultiTag(t *testing.T) { - var res struct { - A uint32 `sshtype:"1|2"` - } - - good1 := struct { - A uint32 `sshtype:"1"` - }{ - 1, - } - good2 := struct { - A uint32 `sshtype:"2"` - }{ - 1, - } - - if e := Unmarshal(Marshal(good1), &res); e != nil { - t.Errorf("error unmarshaling multipart tag: %v", e) - } - - if e := Unmarshal(Marshal(good2), &res); e != nil { - t.Errorf("error unmarshaling multipart tag: %v", e) - } - - bad1 := struct { - A uint32 `sshtype:"3"` - }{ - 1, - } - if e := Unmarshal(Marshal(bad1), &res); e == nil { - t.Errorf("bad struct unmarshaled without error") - } -} - -func randomBytes(out []byte, rand *rand.Rand) { - for i := 0; i < len(out); i++ { - out[i] = byte(rand.Int31()) - } -} - -func randomNameList(rand *rand.Rand) []string { - ret := make([]string, rand.Int31()&15) - for i := range ret { - s := make([]byte, 1+(rand.Int31()&15)) - for j := range s { - s[j] = 'a' + uint8(rand.Int31()&15) - } - ret[i] = string(s) - } - return ret -} - -func randomInt(rand *rand.Rand) *big.Int { - return new(big.Int).SetInt64(int64(int32(rand.Uint32()))) -} - -func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { - ki := &kexInitMsg{} - randomBytes(ki.Cookie[:], rand) - ki.KexAlgos = randomNameList(rand) - ki.ServerHostKeyAlgos = randomNameList(rand) - ki.CiphersClientServer = randomNameList(rand) - ki.CiphersServerClient = randomNameList(rand) - ki.MACsClientServer = randomNameList(rand) - ki.MACsServerClient = randomNameList(rand) - ki.CompressionClientServer = randomNameList(rand) - ki.CompressionServerClient = randomNameList(rand) - ki.LanguagesClientServer = randomNameList(rand) - ki.LanguagesServerClient = randomNameList(rand) - if rand.Int31()&1 == 1 { - ki.FirstKexFollows = true - } - return reflect.ValueOf(ki) -} - -func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { - dhi := &kexDHInitMsg{} - dhi.X = randomInt(rand) - return reflect.ValueOf(dhi) -} - -var ( - _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() - _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() - - _kexInit = Marshal(_kexInitMsg) - _kexDHInit = Marshal(_kexDHInitMsg) -) - -func BenchmarkMarshalKexInitMsg(b *testing.B) { - for i := 0; i < b.N; i++ { - Marshal(_kexInitMsg) - } -} - -func BenchmarkUnmarshalKexInitMsg(b *testing.B) { - m := new(kexInitMsg) - for i := 0; i < b.N; i++ { - Unmarshal(_kexInit, m) - } -} - -func BenchmarkMarshalKexDHInitMsg(b *testing.B) { - for i := 0; i < b.N; i++ { - Marshal(_kexDHInitMsg) - } -} - -func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) { - m := new(kexDHInitMsg) - for i := 0; i < b.N; i++ { - Unmarshal(_kexDHInit, m) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index f1901627..00000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go deleted file mode 100644 index d88b64e4..00000000 --- a/vendor/golang.org/x/crypto/ssh/mux_test.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "io/ioutil" - "sync" - "testing" -) - -func muxPair() (*mux, *mux) { - a, b := memPipe() - - s := newMux(a) - c := newMux(b) - - return s, c -} - -// Returns both ends of a channel, and the mux for the the 2nd -// channel. -func channelPair(t *testing.T) (*channel, *channel, *mux) { - c, s := muxPair() - - res := make(chan *channel, 1) - go func() { - newCh, ok := <-s.incomingChannels - if !ok { - t.Fatalf("No incoming channel") - } - if newCh.ChannelType() != "chan" { - t.Fatalf("got type %q want chan", newCh.ChannelType()) - } - ch, _, err := newCh.Accept() - if err != nil { - t.Fatalf("Accept %v", err) - } - res <- ch.(*channel) - }() - - ch, err := c.openChannel("chan", nil) - if err != nil { - t.Fatalf("OpenChannel: %v", err) - } - - return <-res, ch, c -} - -// Test that stderr and stdout can be addressed from different -// goroutines. This is intended for use with the race detector. -func TestMuxChannelExtendedThreadSafety(t *testing.T) { - writer, reader, mux := channelPair(t) - defer writer.Close() - defer reader.Close() - defer mux.Close() - - var wr, rd sync.WaitGroup - magic := "hello world" - - wr.Add(2) - go func() { - io.WriteString(writer, magic) - wr.Done() - }() - go func() { - io.WriteString(writer.Stderr(), magic) - wr.Done() - }() - - rd.Add(2) - go func() { - c, err := ioutil.ReadAll(reader) - if string(c) != magic { - t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err) - } - rd.Done() - }() - go func() { - c, err := ioutil.ReadAll(reader.Stderr()) - if string(c) != magic { - t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err) - } - rd.Done() - }() - - wr.Wait() - writer.CloseWrite() - rd.Wait() -} - -func TestMuxReadWrite(t *testing.T) { - s, c, mux := channelPair(t) - defer s.Close() - defer c.Close() - defer mux.Close() - - magic := "hello world" - magicExt := "hello stderr" - go func() { - _, err := s.Write([]byte(magic)) - if err != nil { - t.Fatalf("Write: %v", err) - } - _, err = s.Extended(1).Write([]byte(magicExt)) - if err != nil { - t.Fatalf("Write: %v", err) - } - }() - - var buf [1024]byte - n, err := c.Read(buf[:]) - if err != nil { - t.Fatalf("server Read: %v", err) - } - got := string(buf[:n]) - if got != magic { - t.Fatalf("server: got %q want %q", got, magic) - } - - n, err = c.Extended(1).Read(buf[:]) - if err != nil { - t.Fatalf("server Read: %v", err) - } - - got = string(buf[:n]) - if got != magicExt { - t.Fatalf("server: got %q want %q", got, magic) - } -} - -func TestMuxChannelOverflow(t *testing.T) { - reader, writer, mux := channelPair(t) - defer reader.Close() - defer writer.Close() - defer mux.Close() - - wDone := make(chan int, 1) - go func() { - if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { - t.Errorf("could not fill window: %v", err) - } - writer.Write(make([]byte, 1)) - wDone <- 1 - }() - writer.remoteWin.waitWriterBlocked() - - // Send 1 byte. - packet := make([]byte, 1+4+4+1) - packet[0] = msgChannelData - marshalUint32(packet[1:], writer.remoteId) - marshalUint32(packet[5:], uint32(1)) - packet[9] = 42 - - if err := writer.mux.conn.writePacket(packet); err != nil { - t.Errorf("could not send packet") - } - if _, err := reader.SendRequest("hello", true, nil); err == nil { - t.Errorf("SendRequest succeeded.") - } - <-wDone -} - -func TestMuxChannelCloseWriteUnblock(t *testing.T) { - reader, writer, mux := channelPair(t) - defer reader.Close() - defer writer.Close() - defer mux.Close() - - wDone := make(chan int, 1) - go func() { - if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { - t.Errorf("could not fill window: %v", err) - } - if _, err := writer.Write(make([]byte, 1)); err != io.EOF { - t.Errorf("got %v, want EOF for unblock write", err) - } - wDone <- 1 - }() - - writer.remoteWin.waitWriterBlocked() - reader.Close() - <-wDone -} - -func TestMuxConnectionCloseWriteUnblock(t *testing.T) { - reader, writer, mux := channelPair(t) - defer reader.Close() - defer writer.Close() - defer mux.Close() - - wDone := make(chan int, 1) - go func() { - if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { - t.Errorf("could not fill window: %v", err) - } - if _, err := writer.Write(make([]byte, 1)); err != io.EOF { - t.Errorf("got %v, want EOF for unblock write", err) - } - wDone <- 1 - }() - - writer.remoteWin.waitWriterBlocked() - mux.Close() - <-wDone -} - -func TestMuxReject(t *testing.T) { - client, server := muxPair() - defer server.Close() - defer client.Close() - - go func() { - ch, ok := <-server.incomingChannels - if !ok { - t.Fatalf("Accept") - } - if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" { - t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData()) - } - ch.Reject(RejectionReason(42), "message") - }() - - ch, err := client.openChannel("ch", []byte("extra")) - if ch != nil { - t.Fatal("openChannel not rejected") - } - - ocf, ok := err.(*OpenChannelError) - if !ok { - t.Errorf("got %#v want *OpenChannelError", err) - } else if ocf.Reason != 42 || ocf.Message != "message" { - t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message") - } - - want := "ssh: rejected: unknown reason 42 (message)" - if err.Error() != want { - t.Errorf("got %q, want %q", err.Error(), want) - } -} - -func TestMuxChannelRequest(t *testing.T) { - client, server, mux := channelPair(t) - defer server.Close() - defer client.Close() - defer mux.Close() - - var received int - var wg sync.WaitGroup - wg.Add(1) - go func() { - for r := range server.incomingRequests { - received++ - r.Reply(r.Type == "yes", nil) - } - wg.Done() - }() - _, err := client.SendRequest("yes", false, nil) - if err != nil { - t.Fatalf("SendRequest: %v", err) - } - ok, err := client.SendRequest("yes", true, nil) - if err != nil { - t.Fatalf("SendRequest: %v", err) - } - - if !ok { - t.Errorf("SendRequest(yes): %v", ok) - - } - - ok, err = client.SendRequest("no", true, nil) - if err != nil { - t.Fatalf("SendRequest: %v", err) - } - if ok { - t.Errorf("SendRequest(no): %v", ok) - - } - - client.Close() - wg.Wait() - - if received != 3 { - t.Errorf("got %d requests, want %d", received, 3) - } -} - -func TestMuxGlobalRequest(t *testing.T) { - clientMux, serverMux := muxPair() - defer serverMux.Close() - defer clientMux.Close() - - var seen bool - go func() { - for r := range serverMux.incomingRequests { - seen = seen || r.Type == "peek" - if r.WantReply { - err := r.Reply(r.Type == "yes", - append([]byte(r.Type), r.Payload...)) - if err != nil { - t.Errorf("AckRequest: %v", err) - } - } - } - }() - - _, _, err := clientMux.SendRequest("peek", false, nil) - if err != nil { - t.Errorf("SendRequest: %v", err) - } - - ok, data, err := clientMux.SendRequest("yes", true, []byte("a")) - if !ok || string(data) != "yesa" || err != nil { - t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", - ok, data, err) - } - if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil { - t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", - ok, data, err) - } - - if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil { - t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v", - ok, data, err) - } - - if !seen { - t.Errorf("never saw 'peek' request") - } -} - -func TestMuxGlobalRequestUnblock(t *testing.T) { - clientMux, serverMux := muxPair() - defer serverMux.Close() - defer clientMux.Close() - - result := make(chan error, 1) - go func() { - _, _, err := clientMux.SendRequest("hello", true, nil) - result <- err - }() - - <-serverMux.incomingRequests - serverMux.conn.Close() - err := <-result - - if err != io.EOF { - t.Errorf("want EOF, got %v", io.EOF) - } -} - -func TestMuxChannelRequestUnblock(t *testing.T) { - a, b, connB := channelPair(t) - defer a.Close() - defer b.Close() - defer connB.Close() - - result := make(chan error, 1) - go func() { - _, err := a.SendRequest("hello", true, nil) - result <- err - }() - - <-b.incomingRequests - connB.conn.Close() - err := <-result - - if err != io.EOF { - t.Errorf("want EOF, got %v", err) - } -} - -func TestMuxCloseChannel(t *testing.T) { - r, w, mux := channelPair(t) - defer mux.Close() - defer r.Close() - defer w.Close() - - result := make(chan error, 1) - go func() { - var b [1024]byte - _, err := r.Read(b[:]) - result <- err - }() - if err := w.Close(); err != nil { - t.Errorf("w.Close: %v", err) - } - - if _, err := w.Write([]byte("hello")); err != io.EOF { - t.Errorf("got err %v, want io.EOF after Close", err) - } - - if err := <-result; err != io.EOF { - t.Errorf("got %v (%T), want io.EOF", err, err) - } -} - -func TestMuxCloseWriteChannel(t *testing.T) { - r, w, mux := channelPair(t) - defer mux.Close() - - result := make(chan error, 1) - go func() { - var b [1024]byte - _, err := r.Read(b[:]) - result <- err - }() - if err := w.CloseWrite(); err != nil { - t.Errorf("w.CloseWrite: %v", err) - } - - if _, err := w.Write([]byte("hello")); err != io.EOF { - t.Errorf("got err %v, want io.EOF after CloseWrite", err) - } - - if err := <-result; err != io.EOF { - t.Errorf("got %v (%T), want io.EOF", err, err) - } -} - -func TestMuxInvalidRecord(t *testing.T) { - a, b := muxPair() - defer a.Close() - defer b.Close() - - packet := make([]byte, 1+4+4+1) - packet[0] = msgChannelData - marshalUint32(packet[1:], 29348723 /* invalid channel id */) - marshalUint32(packet[5:], 1) - packet[9] = 42 - - a.conn.writePacket(packet) - go a.SendRequest("hello", false, nil) - // 'a' wrote an invalid packet, so 'b' has exited. - req, ok := <-b.incomingRequests - if ok { - t.Errorf("got request %#v after receiving invalid packet", req) - } -} - -func TestZeroWindowAdjust(t *testing.T) { - a, b, mux := channelPair(t) - defer a.Close() - defer b.Close() - defer mux.Close() - - go func() { - io.WriteString(a, "hello") - // bogus adjust. - a.sendMessage(windowAdjustMsg{}) - io.WriteString(a, "world") - a.Close() - }() - - want := "helloworld" - c, _ := ioutil.ReadAll(b) - if string(c) != want { - t.Errorf("got %q want %q", c, want) - } -} - -func TestMuxMaxPacketSize(t *testing.T) { - a, b, mux := channelPair(t) - defer a.Close() - defer b.Close() - defer mux.Close() - - large := make([]byte, a.maxRemotePayload+1) - packet := make([]byte, 1+4+4+1+len(large)) - packet[0] = msgChannelData - marshalUint32(packet[1:], a.remoteId) - marshalUint32(packet[5:], uint32(len(large))) - packet[9] = 42 - - if err := a.mux.conn.writePacket(packet); err != nil { - t.Errorf("could not send packet") - } - - go a.SendRequest("hello", false, nil) - - _, ok := <-b.incomingRequests - if ok { - t.Errorf("connection still alive after receiving large packet.") - } -} - -// Don't ship code with debug=true. -func TestDebug(t *testing.T) { - if debugMux { - t.Error("mux debug switched on") - } - if debugHandshake { - t.Error("handshake debug switched on") - } - if debugTransport { - t.Error("transport debug switched on") - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index d0f48253..00000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b..00000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go deleted file mode 100644 index 7dce6dd6..00000000 --- a/vendor/golang.org/x/crypto/ssh/session_test.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session tests. - -import ( - "bytes" - crypto_rand "crypto/rand" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "testing" - - "golang.org/x/crypto/ssh/terminal" -) - -type serverType func(Channel, <-chan *Request, *testing.T) - -// dial constructs a new test server and returns a *ClientConn. -func dial(handler serverType, t *testing.T) *Client { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - - go func() { - defer c1.Close() - conf := ServerConfig{ - NoClientAuth: true, - } - conf.AddHostKey(testSigners["rsa"]) - - _, chans, reqs, err := NewServerConn(c1, &conf) - if err != nil { - t.Fatalf("Unable to handshake: %v", err) - } - go DiscardRequests(reqs) - - for newCh := range chans { - if newCh.ChannelType() != "session" { - newCh.Reject(UnknownChannelType, "unknown channel type") - continue - } - - ch, inReqs, err := newCh.Accept() - if err != nil { - t.Errorf("Accept: %v", err) - continue - } - go func() { - handler(ch, inReqs, t) - }() - } - }() - - config := &ClientConfig{ - User: "testuser", - HostKeyCallback: InsecureIgnoreHostKey(), - } - - conn, chans, reqs, err := NewClientConn(c2, "", config) - if err != nil { - t.Fatalf("unable to dial remote side: %v", err) - } - - return NewClient(conn, chans, reqs) -} - -// Test a simple string is returned to session.Stdout. -func TestSessionShell(t *testing.T) { - conn := dial(shellHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - stdout := new(bytes.Buffer) - session.Stdout = stdout - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %s", err) - } - if err := session.Wait(); err != nil { - t.Fatalf("Remote command did not exit cleanly: %v", err) - } - actual := stdout.String() - if actual != "golang" { - t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) - } -} - -// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it. - -// Test a simple string is returned via StdoutPipe. -func TestSessionStdoutPipe(t *testing.T) { - conn := dial(shellHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("Unable to request StdoutPipe(): %v", err) - } - var buf bytes.Buffer - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - done := make(chan bool, 1) - go func() { - if _, err := io.Copy(&buf, stdout); err != nil { - t.Errorf("Copy of stdout failed: %v", err) - } - done <- true - }() - if err := session.Wait(); err != nil { - t.Fatalf("Remote command did not exit cleanly: %v", err) - } - <-done - actual := buf.String() - if actual != "golang" { - t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) - } -} - -// Test that a simple string is returned via the Output helper, -// and that stderr is discarded. -func TestSessionOutput(t *testing.T) { - conn := dial(fixedOutputHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - - buf, err := session.Output("") // cmd is ignored by fixedOutputHandler - if err != nil { - t.Error("Remote command did not exit cleanly:", err) - } - w := "this-is-stdout." - g := string(buf) - if g != w { - t.Error("Remote command did not return expected string:") - t.Logf("want %q", w) - t.Logf("got %q", g) - } -} - -// Test that both stdout and stderr are returned -// via the CombinedOutput helper. -func TestSessionCombinedOutput(t *testing.T) { - conn := dial(fixedOutputHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - - buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler - if err != nil { - t.Error("Remote command did not exit cleanly:", err) - } - const stdout = "this-is-stdout." - const stderr = "this-is-stderr." - g := string(buf) - if g != stdout+stderr && g != stderr+stdout { - t.Error("Remote command did not return expected string:") - t.Logf("want %q, or %q", stdout+stderr, stderr+stdout) - t.Logf("got %q", g) - } -} - -// Test non-0 exit status is returned correctly. -func TestExitStatusNonZero(t *testing.T) { - conn := dial(exitStatusNonZeroHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err == nil { - t.Fatalf("expected command to fail but it didn't") - } - e, ok := err.(*ExitError) - if !ok { - t.Fatalf("expected *ExitError but got %T", err) - } - if e.ExitStatus() != 15 { - t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus()) - } -} - -// Test 0 exit status is returned correctly. -func TestExitStatusZero(t *testing.T) { - conn := dial(exitStatusZeroHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err != nil { - t.Fatalf("expected nil but got %v", err) - } -} - -// Test exit signal and status are both returned correctly. -func TestExitSignalAndStatus(t *testing.T) { - conn := dial(exitSignalAndStatusHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err == nil { - t.Fatalf("expected command to fail but it didn't") - } - e, ok := err.(*ExitError) - if !ok { - t.Fatalf("expected *ExitError but got %T", err) - } - if e.Signal() != "TERM" || e.ExitStatus() != 15 { - t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus()) - } -} - -// Test exit signal and status are both returned correctly. -func TestKnownExitSignalOnly(t *testing.T) { - conn := dial(exitSignalHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err == nil { - t.Fatalf("expected command to fail but it didn't") - } - e, ok := err.(*ExitError) - if !ok { - t.Fatalf("expected *ExitError but got %T", err) - } - if e.Signal() != "TERM" || e.ExitStatus() != 143 { - t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus()) - } -} - -// Test exit signal and status are both returned correctly. -func TestUnknownExitSignal(t *testing.T) { - conn := dial(exitSignalUnknownHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err == nil { - t.Fatalf("expected command to fail but it didn't") - } - e, ok := err.(*ExitError) - if !ok { - t.Fatalf("expected *ExitError but got %T", err) - } - if e.Signal() != "SYS" || e.ExitStatus() != 128 { - t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus()) - } -} - -func TestExitWithoutStatusOrSignal(t *testing.T) { - conn := dial(exitWithoutSignalOrStatus, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatalf("Unable to request new session: %v", err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err == nil { - t.Fatalf("expected command to fail but it didn't") - } - if _, ok := err.(*ExitMissingError); !ok { - t.Fatalf("got %T want *ExitMissingError", err) - } -} - -// windowTestBytes is the number of bytes that we'll send to the SSH server. -const windowTestBytes = 16000 * 200 - -// TestServerWindow writes random data to the server. The server is expected to echo -// the same data back, which is compared against the original. -func TestServerWindow(t *testing.T) { - origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) - io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes) - origBytes := origBuf.Bytes() - - conn := dial(echoHandler, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatal(err) - } - defer session.Close() - result := make(chan []byte) - - go func() { - defer close(result) - echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) - serverStdout, err := session.StdoutPipe() - if err != nil { - t.Errorf("StdoutPipe failed: %v", err) - return - } - n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes) - if err != nil && err != io.EOF { - t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err) - } - result <- echoedBuf.Bytes() - }() - - serverStdin, err := session.StdinPipe() - if err != nil { - t.Fatalf("StdinPipe failed: %v", err) - } - written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes) - if err != nil { - t.Fatalf("failed to copy origBuf to serverStdin: %v", err) - } - if written != windowTestBytes { - t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes) - } - - echoedBytes := <-result - - if !bytes.Equal(origBytes, echoedBytes) { - t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes)) - } -} - -// Verify the client can handle a keepalive packet from the server. -func TestClientHandlesKeepalives(t *testing.T) { - conn := dial(channelKeepaliveSender, t) - defer conn.Close() - session, err := conn.NewSession() - if err != nil { - t.Fatal(err) - } - defer session.Close() - if err := session.Shell(); err != nil { - t.Fatalf("Unable to execute command: %v", err) - } - err = session.Wait() - if err != nil { - t.Fatalf("expected nil but got: %v", err) - } -} - -type exitStatusMsg struct { - Status uint32 -} - -type exitSignalMsg struct { - Signal string - CoreDumped bool - Errmsg string - Lang string -} - -func handleTerminalRequests(in <-chan *Request) { - for req := range in { - ok := false - switch req.Type { - case "shell": - ok = true - if len(req.Payload) > 0 { - // We don't accept any commands, only the default shell. - ok = false - } - case "env": - ok = true - } - req.Reply(ok, nil) - } -} - -func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal { - term := terminal.NewTerminal(ch, prompt) - go handleTerminalRequests(in) - return term -} - -func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - // this string is returned to stdout - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - sendStatus(0, ch, t) -} - -func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - sendStatus(15, ch, t) -} - -func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - sendStatus(15, ch, t) - sendSignal("TERM", ch, t) -} - -func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - sendSignal("TERM", ch, t) -} - -func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - sendSignal("SYS", ch, t) -} - -func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) -} - -func shellHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - // this string is returned to stdout - shell := newServerShell(ch, in, "golang") - readLine(shell, t) - sendStatus(0, ch, t) -} - -// Ignores the command, writes fixed strings to stderr and stdout. -// Strings are "this-is-stdout." and "this-is-stderr.". -func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - _, err := ch.Read(nil) - - req, ok := <-in - if !ok { - t.Fatalf("error: expected channel request, got: %#v", err) - return - } - - // ignore request, always send some text - req.Reply(true, nil) - - _, err = io.WriteString(ch, "this-is-stdout.") - if err != nil { - t.Fatalf("error writing on server: %v", err) - } - _, err = io.WriteString(ch.Stderr(), "this-is-stderr.") - if err != nil { - t.Fatalf("error writing on server: %v", err) - } - sendStatus(0, ch, t) -} - -func readLine(shell *terminal.Terminal, t *testing.T) { - if _, err := shell.ReadLine(); err != nil && err != io.EOF { - t.Errorf("unable to read line: %v", err) - } -} - -func sendStatus(status uint32, ch Channel, t *testing.T) { - msg := exitStatusMsg{ - Status: status, - } - if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil { - t.Errorf("unable to send status: %v", err) - } -} - -func sendSignal(signal string, ch Channel, t *testing.T) { - sig := exitSignalMsg{ - Signal: signal, - CoreDumped: false, - Errmsg: "Process terminated", - Lang: "en-GB-oed", - } - if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil { - t.Errorf("unable to send signal: %v", err) - } -} - -func discardHandler(ch Channel, t *testing.T) { - defer ch.Close() - io.Copy(ioutil.Discard, ch) -} - -func echoHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil { - t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err) - } -} - -// copyNRandomly copies n bytes from src to dst. It uses a variable, and random, -// buffer size to exercise more code paths. -func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) { - var ( - buf = make([]byte, 32*1024) - written int - remaining = n - ) - for remaining > 0 { - l := rand.Intn(1 << 15) - if remaining < l { - l = remaining - } - nr, er := src.Read(buf[:l]) - nw, ew := dst.Write(buf[:nr]) - remaining -= nw - written += nw - if ew != nil { - return written, ew - } - if nr != nw { - return written, io.ErrShortWrite - } - if er != nil && er != io.EOF { - return written, er - } - } - return written, nil -} - -func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - shell := newServerShell(ch, in, "> ") - readLine(shell, t) - if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil { - t.Errorf("unable to send channel keepalive request: %v", err) - } - sendStatus(0, ch, t) -} - -func TestClientWriteEOF(t *testing.T) { - conn := dial(simpleEchoHandler, t) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatal(err) - } - defer session.Close() - stdin, err := session.StdinPipe() - if err != nil { - t.Fatalf("StdinPipe failed: %v", err) - } - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("StdoutPipe failed: %v", err) - } - - data := []byte(`0000`) - _, err = stdin.Write(data) - if err != nil { - t.Fatalf("Write failed: %v", err) - } - stdin.Close() - - res, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatalf("Read failed: %v", err) - } - - if !bytes.Equal(data, res) { - t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res) - } -} - -func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) { - defer ch.Close() - data, err := ioutil.ReadAll(ch) - if err != nil { - t.Errorf("handler read error: %v", err) - } - _, err = ch.Write(data) - if err != nil { - t.Errorf("handler write error: %v", err) - } -} - -func TestSessionID(t *testing.T) { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - serverID := make(chan []byte, 1) - clientID := make(chan []byte, 1) - - serverConf := &ServerConfig{ - NoClientAuth: true, - } - serverConf.AddHostKey(testSigners["ecdsa"]) - clientConf := &ClientConfig{ - HostKeyCallback: InsecureIgnoreHostKey(), - User: "user", - } - - go func() { - conn, chans, reqs, err := NewServerConn(c1, serverConf) - if err != nil { - t.Fatalf("server handshake: %v", err) - } - serverID <- conn.SessionID() - go DiscardRequests(reqs) - for ch := range chans { - ch.Reject(Prohibited, "") - } - }() - - go func() { - conn, chans, reqs, err := NewClientConn(c2, "", clientConf) - if err != nil { - t.Fatalf("client handshake: %v", err) - } - clientID <- conn.SessionID() - go DiscardRequests(reqs) - for ch := range chans { - ch.Reject(Prohibited, "") - } - }() - - s := <-serverID - c := <-clientID - if bytes.Compare(s, c) != 0 { - t.Errorf("server session ID (%x) != client session ID (%x)", s, c) - } else if len(s) == 0 { - t.Errorf("client and server SessionID were empty.") - } -} - -type noReadConn struct { - readSeen bool - net.Conn -} - -func (c *noReadConn) Close() error { - return nil -} - -func (c *noReadConn) Read(b []byte) (int, error) { - c.readSeen = true - return 0, errors.New("noReadConn error") -} - -func TestInvalidServerConfiguration(t *testing.T) { - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - serveConn := noReadConn{Conn: c1} - serverConf := &ServerConfig{} - - NewServerConn(&serveConn, serverConf) - if serveConn.readSeen { - t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") - } - - serverConf.AddHostKey(testSigners["ecdsa"]) - - NewServerConn(&serveConn, serverConf) - if serveConn.readSeen { - t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") - } -} - -func TestHostKeyAlgorithms(t *testing.T) { - serverConf := &ServerConfig{ - NoClientAuth: true, - } - serverConf.AddHostKey(testSigners["rsa"]) - serverConf.AddHostKey(testSigners["ecdsa"]) - - connect := func(clientConf *ClientConfig, want string) { - var alg string - clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { - alg = key.Type() - return nil - } - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewServerConn(c1, serverConf) - _, _, _, err = NewClientConn(c2, "", clientConf) - if err != nil { - t.Fatalf("NewClientConn: %v", err) - } - if alg != want { - t.Errorf("selected key algorithm %s, want %s", alg, want) - } - } - - // By default, we get the preferred algorithm, which is ECDSA 256. - - clientConf := &ClientConfig{ - HostKeyCallback: InsecureIgnoreHostKey(), - } - connect(clientConf, KeyAlgoECDSA256) - - // Client asks for RSA explicitly. - clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} - connect(clientConf, KeyAlgoRSA) - - c1, c2, err := netPipe() - if err != nil { - t.Fatalf("netPipe: %v", err) - } - defer c1.Close() - defer c2.Close() - - go NewServerConn(c1, serverConf) - clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} - _, _, _, err = NewClientConn(c2, "", clientConf) - if err == nil { - t.Fatal("succeeded connecting with unknown hostkey algorithm") - } -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330..00000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5e..00000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go deleted file mode 100644 index f1265cb4..00000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "testing" -) - -func TestAutoPortListenBroken(t *testing.T) { - broken := "SSH-2.0-OpenSSH_5.9hh11" - works := "SSH-2.0-OpenSSH_6.1" - if !isBrokenOpenSSHVersion(broken) { - t.Errorf("version %q not marked as broken", broken) - } - if isBrokenOpenSSHVersion(works) { - t.Errorf("version %q marked as broken", works) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel b/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel deleted file mode 100644 index 4eba344d..00000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel +++ /dev/null @@ -1,108 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "terminal.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "util.go", - "util_bsd.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "util.go", - "util_bsd.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "util.go", - "util_bsd.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "util.go", - "util_linux.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "util.go", - "util_bsd.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "util.go", - "util_bsd.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "util_plan9.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "util_solaris.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "util_windows.go", - ], - "//conditions:default": [], - }), - importpath = "golang.org/x/crypto/ssh/terminal", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "terminal_test.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "terminal_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "golang.org/x/crypto/ssh/terminal", -) diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go deleted file mode 100644 index d9b77c1c..00000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris - -package terminal - -import ( - "bytes" - "io" - "os" - "runtime" - "testing" -) - -type MockTerminal struct { - toSend []byte - bytesPerRead int - received []byte -} - -func (c *MockTerminal) Read(data []byte) (n int, err error) { - n = len(data) - if n == 0 { - return - } - if n > len(c.toSend) { - n = len(c.toSend) - } - if n == 0 { - return 0, io.EOF - } - if c.bytesPerRead > 0 && n > c.bytesPerRead { - n = c.bytesPerRead - } - copy(data, c.toSend[:n]) - c.toSend = c.toSend[n:] - return -} - -func (c *MockTerminal) Write(data []byte) (n int, err error) { - c.received = append(c.received, data...) - return len(data), nil -} - -func TestClose(t *testing.T) { - c := &MockTerminal{} - ss := NewTerminal(c, "> ") - line, err := ss.ReadLine() - if line != "" { - t.Errorf("Expected empty line but got: %s", line) - } - if err != io.EOF { - t.Errorf("Error should have been EOF but got: %s", err) - } -} - -var keyPressTests = []struct { - in string - line string - err error - throwAwayLines int -}{ - { - err: io.EOF, - }, - { - in: "\r", - line: "", - }, - { - in: "foo\r", - line: "foo", - }, - { - in: "a\x1b[Cb\r", // right - line: "ab", - }, - { - in: "a\x1b[Db\r", // left - line: "ba", - }, - { - in: "a\177b\r", // backspace - line: "b", - }, - { - in: "\x1b[A\r", // up - }, - { - in: "\x1b[B\r", // down - }, - { - in: "line\x1b[A\x1b[B\r", // up then down - line: "line", - }, - { - in: "line1\rline2\x1b[A\r", // recall previous line. - line: "line1", - throwAwayLines: 1, - }, - { - // recall two previous lines and append. - in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r", - line: "line1xxx", - throwAwayLines: 2, - }, - { - // Ctrl-A to move to beginning of line followed by ^K to kill - // line. - in: "a b \001\013\r", - line: "", - }, - { - // Ctrl-A to move to beginning of line, Ctrl-E to move to end, - // finally ^K to kill nothing. - in: "a b \001\005\013\r", - line: "a b ", - }, - { - in: "\027\r", - line: "", - }, - { - in: "a\027\r", - line: "", - }, - { - in: "a \027\r", - line: "", - }, - { - in: "a b\027\r", - line: "a ", - }, - { - in: "a b \027\r", - line: "a ", - }, - { - in: "one two thr\x1b[D\027\r", - line: "one two r", - }, - { - in: "\013\r", - line: "", - }, - { - in: "a\013\r", - line: "a", - }, - { - in: "ab\x1b[D\013\r", - line: "a", - }, - { - in: "Ξεσκεπάζω\r", - line: "Ξεσκεπάζω", - }, - { - in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace. - line: "", - throwAwayLines: 1, - }, - { - in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter. - line: "£", - throwAwayLines: 1, - }, - { - // Ctrl-D at the end of the line should be ignored. - in: "a\004\r", - line: "a", - }, - { - // a, b, left, Ctrl-D should erase the b. - in: "ab\x1b[D\004\r", - line: "a", - }, - { - // a, b, c, d, left, left, ^U should erase to the beginning of - // the line. - in: "abcd\x1b[D\x1b[D\025\r", - line: "cd", - }, - { - // Bracketed paste mode: control sequences should be returned - // verbatim in paste mode. - in: "abc\x1b[200~de\177f\x1b[201~\177\r", - line: "abcde\177", - }, - { - // Enter in bracketed paste mode should still work. - in: "abc\x1b[200~d\refg\x1b[201~h\r", - line: "efgh", - throwAwayLines: 1, - }, - { - // Lines consisting entirely of pasted data should be indicated as such. - in: "\x1b[200~a\r", - line: "a", - err: ErrPasteIndicator, - }, -} - -func TestKeyPresses(t *testing.T) { - for i, test := range keyPressTests { - for j := 1; j < len(test.in); j++ { - c := &MockTerminal{ - toSend: []byte(test.in), - bytesPerRead: j, - } - ss := NewTerminal(c, "> ") - for k := 0; k < test.throwAwayLines; k++ { - _, err := ss.ReadLine() - if err != nil { - t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err) - } - } - line, err := ss.ReadLine() - if line != test.line { - t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line) - break - } - if err != test.err { - t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err) - break - } - } - } -} - -func TestPasswordNotSaved(t *testing.T) { - c := &MockTerminal{ - toSend: []byte("password\r\x1b[A\r"), - bytesPerRead: 1, - } - ss := NewTerminal(c, "> ") - pw, _ := ss.ReadPassword("> ") - if pw != "password" { - t.Fatalf("failed to read password, got %s", pw) - } - line, _ := ss.ReadLine() - if len(line) > 0 { - t.Fatalf("password was saved in history") - } -} - -var setSizeTests = []struct { - width, height int -}{ - {40, 13}, - {80, 24}, - {132, 43}, -} - -func TestTerminalSetSize(t *testing.T) { - for _, setSize := range setSizeTests { - c := &MockTerminal{ - toSend: []byte("password\r\x1b[A\r"), - bytesPerRead: 1, - } - ss := NewTerminal(c, "> ") - ss.SetSize(setSize.width, setSize.height) - pw, _ := ss.ReadPassword("Password: ") - if pw != "password" { - t.Fatalf("failed to read password, got %s", pw) - } - if string(c.received) != "Password: \r\n" { - t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received) - } - } -} - -func TestReadPasswordLineEnd(t *testing.T) { - var tests = []struct { - input string - want string - }{ - {"\n", ""}, - {"\r\n", ""}, - {"test\r\n", "test"}, - {"testtesttesttes\n", "testtesttesttes"}, - {"testtesttesttes\r\n", "testtesttesttes"}, - {"testtesttesttesttest\n", "testtesttesttesttest"}, - {"testtesttesttesttest\r\n", "testtesttesttesttest"}, - } - for _, test := range tests { - buf := new(bytes.Buffer) - if _, err := buf.WriteString(test.input); err != nil { - t.Fatal(err) - } - - have, err := readPasswordLine(buf) - if err != nil { - t.Errorf("readPasswordLine(%q) failed: %v", test.input, err) - continue - } - if string(have) != test.want { - t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want) - continue - } - - if _, err = buf.WriteString(test.input); err != nil { - t.Fatal(err) - } - have, err = readPasswordLine(buf) - if err != nil { - t.Errorf("readPasswordLine(%q) failed: %v", test.input, err) - continue - } - if string(have) != test.want { - t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want) - continue - } - } -} - -func TestMakeRawState(t *testing.T) { - fd := int(os.Stdout.Fd()) - if !IsTerminal(fd) { - t.Skip("stdout is not a terminal; skipping test") - } - - st, err := GetState(fd) - if err != nil { - t.Fatalf("failed to get terminal state from GetState: %s", err) - } - - if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { - t.Skip("MakeRaw not allowed on iOS; skipping test") - } - - defer Restore(fd, st) - raw, err := MakeRaw(fd) - if err != nil { - t.Fatalf("failed to get terminal state from MakeRaw: %s", err) - } - - if *st != *raw { - t.Errorf("states do not match; was %v, expected %v", raw, st) - } -} - -func TestOutputNewlines(t *testing.T) { - // \n should be changed to \r\n in terminal output. - buf := new(bytes.Buffer) - term := NewTerminal(buf, ">") - - term.Write([]byte("1\n2\n")) - output := string(buf.Bytes()) - const expected = "1\r\n2\r\n" - - if output != expected { - t.Errorf("incorrect output: was %q, expected %q", output, expected) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go deleted file mode 100644 index 2da8c79d..00000000 --- a/vendor/golang.org/x/crypto/ssh/testdata_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: -// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three -// instances. - -package ssh - -import ( - "crypto/rand" - "fmt" - - "golang.org/x/crypto/ssh/testdata" -) - -var ( - testPrivateKeys map[string]interface{} - testSigners map[string]Signer - testPublicKeys map[string]PublicKey -) - -func init() { - var err error - - n := len(testdata.PEMBytes) - testPrivateKeys = make(map[string]interface{}, n) - testSigners = make(map[string]Signer, n) - testPublicKeys = make(map[string]PublicKey, n) - for t, k := range testdata.PEMBytes { - testPrivateKeys[t], err = ParseRawPrivateKey(k) - if err != nil { - panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) - } - testSigners[t], err = NewSignerFromKey(testPrivateKeys[t]) - if err != nil { - panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) - } - testPublicKeys[t] = testSigners[t].PublicKey() - } - - // Create a cert and sign it for use in tests. - testCert := &Certificate{ - Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil - ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage - ValidAfter: 0, // unix epoch - ValidBefore: CertTimeInfinity, // The end of currently representable time. - Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil - Key: testPublicKeys["ecdsa"], - SignatureKey: testPublicKeys["rsa"], - Permissions: Permissions{ - CriticalOptions: map[string]string{}, - Extensions: map[string]string{}, - }, - } - testCert.SignCert(rand.Reader, testSigners["rsa"]) - testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] - testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"]) - if err != nil { - panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index f6fae1db..00000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writePacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go deleted file mode 100644 index 8445e1e5..00000000 --- a/vendor/golang.org/x/crypto/ssh/transport_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "strings" - "testing" -) - -func TestReadVersion(t *testing.T) { - longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] - multiLineVersion := strings.Repeat("ignored\r\n", 20) + "SSH-2.0-bla\r\n" - cases := map[string]string{ - "SSH-2.0-bla\r\n": "SSH-2.0-bla", - "SSH-2.0-bla\n": "SSH-2.0-bla", - multiLineVersion: "SSH-2.0-bla", - longVersion + "\r\n": longVersion, - } - - for in, want := range cases { - result, err := readVersion(bytes.NewBufferString(in)) - if err != nil { - t.Errorf("readVersion(%q): %s", in, err) - } - got := string(result) - if got != want { - t.Errorf("got %q, want %q", got, want) - } - } -} - -func TestReadVersionError(t *testing.T) { - longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253] - multiLineVersion := strings.Repeat("ignored\r\n", 50) + "SSH-2.0-bla\r\n" - cases := []string{ - longVersion + "too-long\r\n", - multiLineVersion, - } - for _, in := range cases { - if _, err := readVersion(bytes.NewBufferString(in)); err == nil { - t.Errorf("readVersion(%q) should have failed", in) - } - } -} - -func TestExchangeVersionsBasic(t *testing.T) { - v := "SSH-2.0-bla" - buf := bytes.NewBufferString(v + "\r\n") - them, err := exchangeVersions(buf, []byte("xyz")) - if err != nil { - t.Errorf("exchangeVersions: %v", err) - } - - if want := "SSH-2.0-bla"; string(them) != want { - t.Errorf("got %q want %q for our version", them, want) - } -} - -func TestExchangeVersions(t *testing.T) { - cases := []string{ - "not\x000allowed", - "not allowed\x01\r\n", - } - for _, c := range cases { - buf := bytes.NewBufferString("SSH-2.0-bla\r\n") - if _, err := exchangeVersions(buf, []byte(c)); err == nil { - t.Errorf("exchangeVersions(%q): should have failed", c) - } - } -} - -type closerBuffer struct { - bytes.Buffer -} - -func (b *closerBuffer) Close() error { - return nil -} - -func TestTransportMaxPacketWrite(t *testing.T) { - buf := &closerBuffer{} - tr := newTransport(buf, rand.Reader, true) - huge := make([]byte, maxPacket+1) - err := tr.writePacket(huge) - if err == nil { - t.Errorf("transport accepted write for a huge packet.") - } -} - -func TestTransportMaxPacketReader(t *testing.T) { - var header [5]byte - huge := make([]byte, maxPacket+128) - binary.BigEndian.PutUint32(header[0:], uint32(len(huge))) - // padding. - header[4] = 0 - - buf := &closerBuffer{} - buf.Write(header[:]) - buf.Write(huge) - - tr := newTransport(buf, rand.Reader, true) - _, err := tr.readPacket() - if err == nil { - t.Errorf("transport succeeded reading huge packet.") - } else if !strings.Contains(err.Error(), "large") { - t.Errorf("got %q, should mention %q", err.Error(), "large") - } -} diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes deleted file mode 100644 index d2f212e5..00000000 --- a/vendor/golang.org/x/net/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore deleted file mode 100644 index 8339fd61..00000000 --- a/vendor/golang.org/x/net/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md deleted file mode 100644 index d0485e88..00000000 --- a/vendor/golang.org/x/net/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/net/README.md b/vendor/golang.org/x/net/README.md deleted file mode 100644 index 00a9b6eb..00000000 --- a/vendor/golang.org/x/net/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Go Networking - -This repository holds supplementary Go networking libraries. - -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/net`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/net`. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit -changes to this repository, see https://golang.org/doc/contribute.html. -The main issue tracker for the net repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/net:" in the -subject line, so it is easy to find. diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/golang.org/x/net/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/BUILD.bazel b/vendor/golang.org/x/net/context/BUILD.bazel deleted file mode 100644 index 0dacb288..00000000 --- a/vendor/golang.org/x/net/context/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "go17.go", - "go19.go", - "pre_go17.go", - "pre_go19.go", - ], - importpath = "golang.org/x/net/context", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["context_test.go"], - embed = [":go_default_library"], - importpath = "golang.org/x/net/context", -) - -go_test( - name = "go_default_xtest", - srcs = ["withtimeout_test.go"], - importpath = "golang.org/x/net/context_test", - deps = [":go_default_library"], -) diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go deleted file mode 100644 index 62844131..00000000 --- a/vendor/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) - testDeadline(c, 2*timeUnit, t) -} - -func TestTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 1*timeUnit) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o = otherContext{c} - c, _ = WithTimeout(o, 3*timeUnit) - testDeadline(c, 2*timeUnit, t) -} - -func TestCanceledTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 2*timeUnit) - o := otherContext{c} - c, cancel := WithTimeout(o, 4*timeUnit) - cancel() - time.Sleep(1 * timeUnit) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 16, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TODO(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + 100*time.Millisecond): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} - -func TestCancelRemoves(t *testing.T) { - checkChildren := func(when string, ctx Context, want int) { - if got := len(ctx.(*cancelCtx).children); got != want { - t.Errorf("%s: context has %d children, want %d", when, got, want) - } - } - - ctx, _ := WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel := WithCancel(ctx) - checkChildren("with WithCancel child ", ctx, 1) - cancel() - checkChildren("after cancelling WithCancel child", ctx, 0) - - ctx, _ = WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel = WithTimeout(ctx, 60*time.Minute) - checkChildren("with WithTimeout child ", ctx, 1) - cancel() - checkChildren("after cancelling WithTimeout child", ctx, 0) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel b/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel deleted file mode 100644 index 7df92491..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel +++ /dev/null @@ -1,104 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "ctxhttp.go", - "ctxhttp_pre17.go", - ], - importpath = "golang.org/x/net/context/ctxhttp", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:android": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "ctxhttp_17_test.go", - "ctxhttp_pre17_test.go", - "ctxhttp_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "golang.org/x/net/context/ctxhttp", - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/golang.org/x/net/context:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go deleted file mode 100644 index 72411b1b..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,go1.7 - -package ctxhttp - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "context" -) - -func TestGo17Context(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - })) - defer ts.Close() - ctx := context.Background() - resp, err := Get(ctx, http.DefaultClient, ts.URL) - if resp == nil || err != nil { - t.Fatalf("error received from client: %v %v", err, resp) - } - resp.Body.Close() -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go deleted file mode 100644 index 9159cf02..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!go1.7 - -package ctxhttp - -import ( - "net" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "golang.org/x/net/context" -) - -// golang.org/issue/14065 -func TestClosesResponseBodyOnCancel(t *testing.T) { - defer func() { testHookContextDoneBeforeHeaders = nop }() - defer func() { testHookDoReturned = nop }() - defer func() { testHookDidBodyClose = nop }() - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) - defer ts.Close() - - ctx, cancel := context.WithCancel(context.Background()) - - // closed when Do enters select case <-ctx.Done() - enteredDonePath := make(chan struct{}) - - testHookContextDoneBeforeHeaders = func() { - close(enteredDonePath) - } - - testHookDoReturned = func() { - // We now have the result (the Flush'd headers) at least, - // so we can cancel the request. - cancel() - - // But block the client.Do goroutine from sending - // until Do enters into the <-ctx.Done() path, since - // otherwise if both channels are readable, select - // picks a random one. - <-enteredDonePath - } - - sawBodyClose := make(chan struct{}) - testHookDidBodyClose = func() { close(sawBodyClose) } - - tr := &http.Transport{} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - req, _ := http.NewRequest("GET", ts.URL, nil) - _, doErr := Do(ctx, c, req) - - select { - case <-sawBodyClose: - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for body to close") - } - - if doErr != ctx.Err() { - t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) - } -} - -type noteCloseConn struct { - net.Conn - onceClose sync.Once - closefn func() -} - -func (c *noteCloseConn) Close() error { - c.onceClose.Do(c.closefn) - return c.Conn.Close() -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go deleted file mode 100644 index 1e415518..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package ctxhttp - -import ( - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "golang.org/x/net/context" -) - -const ( - requestDuration = 100 * time.Millisecond - requestBody = "ok" -) - -func okHandler(w http.ResponseWriter, r *http.Request) { - time.Sleep(requestDuration) - io.WriteString(w, requestBody) -} - -func TestNoTimeout(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(okHandler)) - defer ts.Close() - - ctx := context.Background() - res, err := Get(ctx, nil, ts.URL) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(slurp) != requestBody { - t.Errorf("body = %q; want %q", slurp, requestBody) - } -} - -func TestCancelBeforeHeaders(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - blockServer := make(chan struct{}) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - cancel() - <-blockServer - io.WriteString(w, requestBody) - })) - defer ts.Close() - defer close(blockServer) - - res, err := Get(ctx, nil, ts.URL) - if err == nil { - res.Body.Close() - t.Fatal("Get returned unexpected nil error") - } - if err != context.Canceled { - t.Errorf("err = %v; want %v", err, context.Canceled) - } -} - -func TestCancelAfterHangingRequest(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - <-w.(http.CloseNotifier).CloseNotify() - })) - defer ts.Close() - - ctx, cancel := context.WithCancel(context.Background()) - resp, err := Get(ctx, nil, ts.URL) - if err != nil { - t.Fatalf("unexpected error in Get: %v", err) - } - - // Cancel befer reading the body. - // Reading Request.Body should fail, since the request was - // canceled before anything was written. - cancel() - - done := make(chan struct{}) - - go func() { - b, err := ioutil.ReadAll(resp.Body) - if len(b) != 0 || err == nil { - t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) - } - close(done) - }() - - select { - case <-time.After(1 * time.Second): - t.Errorf("Test timed out") - case <-done: - } -} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index e6f56691..00000000 --- a/vendor/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -// This example passes a context with a timeout to tell a blocking function that -// it should abandon its work after the timeout elapses. -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) - defer cancel() - - select { - case <-time.After(1 * time.Second): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - - // Output: - // context deadline exceeded -} diff --git a/vendor/golang.org/x/net/internal/socks/BUILD.bazel b/vendor/golang.org/x/net/internal/socks/BUILD.bazel deleted file mode 100644 index c247366a..00000000 --- a/vendor/golang.org/x/net/internal/socks/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "socks.go", - ], - importpath = "golang.org/x/net/internal/socks", - visibility = ["//vendor/golang.org/x/net:__subpackages__"], -) - -go_test( - name = "go_default_xtest", - srcs = ["dial_test.go"], - importpath = "golang.org/x/net/internal/socks_test", - deps = [ - ":go_default_library", - "//vendor/golang.org/x/net/internal/sockstest:go_default_library", - ], -) diff --git a/vendor/golang.org/x/net/internal/socks/dial_test.go b/vendor/golang.org/x/net/internal/socks/dial_test.go deleted file mode 100644 index 3a7a31bd..00000000 --- a/vendor/golang.org/x/net/internal/socks/dial_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package socks_test - -import ( - "context" - "io" - "math/rand" - "net" - "os" - "testing" - "time" - - "golang.org/x/net/internal/socks" - "golang.org/x/net/internal/sockstest" -) - -func TestDial(t *testing.T) { - t.Run("Connect", func(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = (&socks.UsernamePassword{ - Username: "username", - Password: "password", - }).Authenticate - c, err := d.DialContext(context.Background(), ss.TargetAddr().Network(), ss.TargetAddr().String()) - if err != nil { - t.Fatal(err) - } - c.(*socks.Conn).BoundAddr() - c.Close() - }) - t.Run("ConnectWithConn", func(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - c, err := net.Dial(ss.Addr().Network(), ss.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer c.Close() - d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = (&socks.UsernamePassword{ - Username: "username", - Password: "password", - }).Authenticate - a, err := d.DialWithConn(context.Background(), c, ss.TargetAddr().Network(), ss.TargetAddr().String()) - if err != nil { - t.Fatal(err) - } - if _, ok := a.(*socks.Addr); !ok { - t.Fatalf("got %+v; want socks.Addr", a) - } - }) - t.Run("Cancel", func(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, blackholeCmdFunc) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - dialErr := make(chan error) - go func() { - c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) - if err == nil { - c.Close() - } - dialErr <- err - }() - time.Sleep(100 * time.Millisecond) - cancel() - err = <-dialErr - if perr, nerr := parseDialError(err); perr != context.Canceled && nerr == nil { - t.Fatalf("got %v; want context.Canceled or equivalent", err) - } - }) - t.Run("Deadline", func(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, blackholeCmdFunc) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) - defer cancel() - c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) - if err == nil { - c.Close() - } - if perr, nerr := parseDialError(err); perr != context.DeadlineExceeded && nerr == nil { - t.Fatalf("got %v; want context.DeadlineExceeded or equivalent", err) - } - }) - t.Run("WithRogueServer", func(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, rogueCmdFunc) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) - for i := 0; i < 2*len(rogueCmdList); i++ { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) - defer cancel() - c, err := d.DialContext(ctx, ss.TargetAddr().Network(), ss.TargetAddr().String()) - if err == nil { - t.Log(c.(*socks.Conn).BoundAddr()) - c.Close() - t.Error("should fail") - } - } - }) -} - -func blackholeCmdFunc(rw io.ReadWriter, b []byte) error { - if _, err := sockstest.ParseCmdRequest(b); err != nil { - return err - } - var bb [1]byte - for { - if _, err := rw.Read(bb[:]); err != nil { - return err - } - } -} - -func rogueCmdFunc(rw io.ReadWriter, b []byte) error { - if _, err := sockstest.ParseCmdRequest(b); err != nil { - return err - } - rw.Write(rogueCmdList[rand.Intn(len(rogueCmdList))]) - return nil -} - -var rogueCmdList = [][]byte{ - {0x05}, - {0x06, 0x00, 0x00, 0x01, 192, 0, 2, 1, 0x17, 0x4b}, - {0x05, 0x00, 0xff, 0x01, 192, 0, 2, 2, 0x17, 0x4b}, - {0x05, 0x00, 0x00, 0x01, 192, 0, 2, 3}, - {0x05, 0x00, 0x00, 0x03, 0x04, 'F', 'Q', 'D', 'N'}, -} - -func parseDialError(err error) (perr, nerr error) { - if e, ok := err.(*net.OpError); ok { - err = e.Err - nerr = e - } - if e, ok := err.(*os.SyscallError); ok { - err = e.Err - } - perr = err - return -} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go index d93e699b..6929a9fd 100644 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -224,6 +224,7 @@ func (d *Dialer) Dial(network, address string) (net.Conn, error) { return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} } if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() return nil, err } return c, nil diff --git a/vendor/golang.org/x/net/proxy/BUILD.bazel b/vendor/golang.org/x/net/proxy/BUILD.bazel deleted file mode 100644 index 496f1cf0..00000000 --- a/vendor/golang.org/x/net/proxy/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "direct.go", - "per_host.go", - "proxy.go", - "socks5.go", - ], - importpath = "golang.org/x/net/proxy", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/internal/socks:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = [ - "per_host_test.go", - "proxy_test.go", - ], - embed = [":go_default_library"], - importpath = "golang.org/x/net/proxy", - deps = ["//vendor/golang.org/x/net/internal/sockstest:go_default_library"], -) diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go deleted file mode 100644 index a7d80957..00000000 --- a/vendor/golang.org/x/net/proxy/per_host_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "errors" - "net" - "reflect" - "testing" -) - -type recordingProxy struct { - addrs []string -} - -func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { - r.addrs = append(r.addrs, addr) - return nil, errors.New("recordingProxy") -} - -func TestPerHost(t *testing.T) { - var def, bypass recordingProxy - perHost := NewPerHost(&def, &bypass) - perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") - - expectedDef := []string{ - "example.com:123", - "1.2.3.4:123", - "[1001::]:123", - } - expectedBypass := []string{ - "localhost:123", - "zone:123", - "foo.zone:123", - "127.0.0.1:123", - "10.1.2.3:123", - "[1000::]:123", - } - - for _, addr := range expectedDef { - perHost.Dial("tcp", addr) - } - for _, addr := range expectedBypass { - perHost.Dial("tcp", addr) - } - - if !reflect.DeepEqual(expectedDef, def.addrs) { - t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) - } - if !reflect.DeepEqual(expectedBypass, bypass.addrs) { - t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) - } -} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go deleted file mode 100644 index 0be1b422..00000000 --- a/vendor/golang.org/x/net/proxy/proxy_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "bytes" - "fmt" - "net/url" - "os" - "strings" - "testing" - - "golang.org/x/net/internal/sockstest" -) - -type proxyFromEnvTest struct { - allProxyEnv string - noProxyEnv string - wantTypeOf Dialer -} - -func (t proxyFromEnvTest) String() string { - var buf bytes.Buffer - space := func() { - if buf.Len() > 0 { - buf.WriteByte(' ') - } - } - if t.allProxyEnv != "" { - fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) - } - if t.noProxyEnv != "" { - space() - fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) - } - return strings.TrimSpace(buf.String()) -} - -func TestFromEnvironment(t *testing.T) { - ResetProxyEnv() - - type dummyDialer struct { - direct - } - - RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { - return dummyDialer{}, nil - }) - - proxyFromEnvTests := []proxyFromEnvTest{ - {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, - {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, - {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {wantTypeOf: direct{}}, - } - - for _, tt := range proxyFromEnvTests { - os.Setenv("ALL_PROXY", tt.allProxyEnv) - os.Setenv("NO_PROXY", tt.noProxyEnv) - ResetCachedEnvironment() - - d := FromEnvironment() - if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { - t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) - } - } -} - -func TestFromURL(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - url, err := url.Parse("socks5://user:password@" + ss.Addr().String()) - if err != nil { - t.Fatal(err) - } - proxy, err := FromURL(url, nil) - if err != nil { - t.Fatal(err) - } - c, err := proxy.Dial("tcp", "fqdn.doesnotexist:5963") - if err != nil { - t.Fatal(err) - } - c.Close() -} - -func TestSOCKS5(t *testing.T) { - ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) - if err != nil { - t.Fatal(err) - } - defer ss.Close() - proxy, err := SOCKS5("tcp", ss.Addr().String(), nil, nil) - if err != nil { - t.Fatal(err) - } - c, err := proxy.Dial("tcp", ss.TargetAddr().String()) - if err != nil { - t.Fatal(err) - } - c.Close() -} - -func ResetProxyEnv() { - for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { - for _, v := range env.names { - os.Setenv(v, "") - } - } - ResetCachedEnvironment() -} - -func ResetCachedEnvironment() { - allProxyEnv.reset() - noProxyEnv.reset() -} diff --git a/vendor/golang.org/x/sys/.gitattributes b/vendor/golang.org/x/sys/.gitattributes deleted file mode 100644 index d2f212e5..00000000 --- a/vendor/golang.org/x/sys/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/golang.org/x/sys/.gitignore b/vendor/golang.org/x/sys/.gitignore deleted file mode 100644 index 8339fd61..00000000 --- a/vendor/golang.org/x/sys/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md deleted file mode 100644 index d0485e88..00000000 --- a/vendor/golang.org/x/sys/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md deleted file mode 100644 index ef6c9e59..00000000 --- a/vendor/golang.org/x/sys/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# sys - -This repository holds supplemental Go packages for low-level interactions with -the operating system. - -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/sys`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the sys repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the -subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sys/codereview.cfg b/vendor/golang.org/x/sys/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/golang.org/x/sys/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore deleted file mode 100644 index e3e0fc6f..00000000 --- a/vendor/golang.org/x/sys/unix/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -_obj/ -unix.test diff --git a/vendor/golang.org/x/sys/unix/BUILD.bazel b/vendor/golang.org/x/sys/unix/BUILD.bazel deleted file mode 100644 index 39c6be08..00000000 --- a/vendor/golang.org/x/sys/unix/BUILD.bazel +++ /dev/null @@ -1,529 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "constants.go", - "dev_darwin.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_darwin.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "constants.go", - "dev_dragonfly.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_dragonfly.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "cap_freebsd.go", - "constants.go", - "dev_freebsd.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_freebsd.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "affinity_linux.go", - "bluetooth_linux.go", - "constants.go", - "dev_linux.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_linux.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_linux.go", - "syscall_linux_gc.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "dirent.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "constants.go", - "dev_netbsd.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_netbsd.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "constants.go", - "dev_openbsd.go", - "dirent.go", - "env_unix.go", - "fcntl.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_openbsd.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "constants.go", - "dirent.go", - "env_unix.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_solaris.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - ], - "//conditions:default": [], - }) + select({ - "@io_bazel_rules_go//go/platform:386": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:amd64": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:amd64p32": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:arm": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:arm64": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:mips": [ - "endian_big.go", - ], - "@io_bazel_rules_go//go/platform:mips64": [ - "endian_big.go", - ], - "@io_bazel_rules_go//go/platform:mips64le": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:mipsle": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:ppc64": [ - "endian_big.go", - ], - "@io_bazel_rules_go//go/platform:ppc64le": [ - "endian_little.go", - ], - "@io_bazel_rules_go//go/platform:s390x": [ - "endian_big.go", - ], - "//conditions:default": [], - }) + select({ - "@io_bazel_rules_go//go/platform:darwin_386": [ - "asm_darwin_386.s", - "syscall_darwin_386.go", - "zerrors_darwin_386.go", - "zsyscall_darwin_386.go", - "zsysnum_darwin_386.go", - "ztypes_darwin_386.go", - ], - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "asm_darwin_amd64.s", - "syscall_darwin_amd64.go", - "zerrors_darwin_amd64.go", - "zsyscall_darwin_amd64.go", - "zsysnum_darwin_amd64.go", - "ztypes_darwin_amd64.go", - ], - "@io_bazel_rules_go//go/platform:darwin_arm": [ - "asm_darwin_arm.s", - "syscall_darwin_arm.go", - "zerrors_darwin_arm.go", - "zsyscall_darwin_arm.go", - "zsysnum_darwin_arm.go", - "ztypes_darwin_arm.go", - ], - "@io_bazel_rules_go//go/platform:darwin_arm64": [ - "asm_darwin_arm64.s", - "syscall_darwin_arm64.go", - "zerrors_darwin_arm64.go", - "zsyscall_darwin_arm64.go", - "zsysnum_darwin_arm64.go", - "ztypes_darwin_arm64.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly_amd64": [ - "asm_dragonfly_amd64.s", - "syscall_dragonfly_amd64.go", - "zerrors_dragonfly_amd64.go", - "zsyscall_dragonfly_amd64.go", - "zsysnum_dragonfly_amd64.go", - "ztypes_dragonfly_amd64.go", - ], - "@io_bazel_rules_go//go/platform:freebsd_386": [ - "asm_freebsd_386.s", - "errors_freebsd_386.go", - "syscall_freebsd_386.go", - "zerrors_freebsd_386.go", - "zsyscall_freebsd_386.go", - "zsysnum_freebsd_386.go", - "ztypes_freebsd_386.go", - ], - "@io_bazel_rules_go//go/platform:freebsd_amd64": [ - "asm_freebsd_amd64.s", - "errors_freebsd_amd64.go", - "syscall_freebsd_amd64.go", - "zerrors_freebsd_amd64.go", - "zsyscall_freebsd_amd64.go", - "zsysnum_freebsd_amd64.go", - "ztypes_freebsd_amd64.go", - ], - "@io_bazel_rules_go//go/platform:freebsd_arm": [ - "asm_freebsd_arm.s", - "errors_freebsd_arm.go", - "syscall_freebsd_arm.go", - "zerrors_freebsd_arm.go", - "zsyscall_freebsd_arm.go", - "zsysnum_freebsd_arm.go", - "ztypes_freebsd_arm.go", - ], - "@io_bazel_rules_go//go/platform:linux_386": [ - "asm_linux_386.s", - "fcntl_linux_32bit.go", - "syscall_linux_386.go", - "syscall_linux_gc_386.go", - "zerrors_linux_386.go", - "zptrace386_linux.go", - "zsyscall_linux_386.go", - "zsysnum_linux_386.go", - "ztypes_linux_386.go", - ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "asm_linux_amd64.s", - "syscall_linux_amd64.go", - "syscall_linux_amd64_gc.go", - "zerrors_linux_amd64.go", - "zptrace386_linux.go", - "zsyscall_linux_amd64.go", - "zsysnum_linux_amd64.go", - "ztypes_linux_amd64.go", - ], - "@io_bazel_rules_go//go/platform:linux_arm": [ - "asm_linux_arm.s", - "fcntl_linux_32bit.go", - "syscall_linux_arm.go", - "zerrors_linux_arm.go", - "zptracearm_linux.go", - "zsyscall_linux_arm.go", - "zsysnum_linux_arm.go", - "ztypes_linux_arm.go", - ], - "@io_bazel_rules_go//go/platform:linux_arm64": [ - "asm_linux_arm64.s", - "syscall_linux_arm64.go", - "zerrors_linux_arm64.go", - "zptracearm_linux.go", - "zsyscall_linux_arm64.go", - "zsysnum_linux_arm64.go", - "ztypes_linux_arm64.go", - ], - "@io_bazel_rules_go//go/platform:linux_mips": [ - "asm_linux_mipsx.s", - "fcntl_linux_32bit.go", - "syscall_linux_mipsx.go", - "zerrors_linux_mips.go", - "zptracemips_linux.go", - "zsyscall_linux_mips.go", - "zsysnum_linux_mips.go", - "ztypes_linux_mips.go", - ], - "@io_bazel_rules_go//go/platform:linux_mips64": [ - "asm_linux_mips64x.s", - "syscall_linux_mips64x.go", - "zerrors_linux_mips64.go", - "zptracemips_linux.go", - "zsyscall_linux_mips64.go", - "zsysnum_linux_mips64.go", - "ztypes_linux_mips64.go", - ], - "@io_bazel_rules_go//go/platform:linux_mips64le": [ - "asm_linux_mips64x.s", - "syscall_linux_mips64x.go", - "zerrors_linux_mips64le.go", - "zptracemipsle_linux.go", - "zsyscall_linux_mips64le.go", - "zsysnum_linux_mips64le.go", - "ztypes_linux_mips64le.go", - ], - "@io_bazel_rules_go//go/platform:linux_mipsle": [ - "asm_linux_mipsx.s", - "fcntl_linux_32bit.go", - "syscall_linux_mipsx.go", - "zerrors_linux_mipsle.go", - "zptracemipsle_linux.go", - "zsyscall_linux_mipsle.go", - "zsysnum_linux_mipsle.go", - "ztypes_linux_mipsle.go", - ], - "@io_bazel_rules_go//go/platform:linux_ppc64": [ - "asm_linux_ppc64x.s", - "syscall_linux_ppc64x.go", - "zerrors_linux_ppc64.go", - "zsyscall_linux_ppc64.go", - "zsysnum_linux_ppc64.go", - "ztypes_linux_ppc64.go", - ], - "@io_bazel_rules_go//go/platform:linux_ppc64le": [ - "asm_linux_ppc64x.s", - "syscall_linux_ppc64x.go", - "zerrors_linux_ppc64le.go", - "zsyscall_linux_ppc64le.go", - "zsysnum_linux_ppc64le.go", - "ztypes_linux_ppc64le.go", - ], - "@io_bazel_rules_go//go/platform:linux_s390x": [ - "asm_linux_s390x.s", - "syscall_linux_s390x.go", - "zerrors_linux_s390x.go", - "zsyscall_linux_s390x.go", - "zsysnum_linux_s390x.go", - "ztypes_linux_s390x.go", - ], - "@io_bazel_rules_go//go/platform:netbsd_386": [ - "asm_netbsd_386.s", - "syscall_netbsd_386.go", - "zerrors_netbsd_386.go", - "zsyscall_netbsd_386.go", - "zsysnum_netbsd_386.go", - "ztypes_netbsd_386.go", - ], - "@io_bazel_rules_go//go/platform:netbsd_amd64": [ - "asm_netbsd_amd64.s", - "syscall_netbsd_amd64.go", - "zerrors_netbsd_amd64.go", - "zsyscall_netbsd_amd64.go", - "zsysnum_netbsd_amd64.go", - "ztypes_netbsd_amd64.go", - ], - "@io_bazel_rules_go//go/platform:netbsd_arm": [ - "asm_netbsd_arm.s", - "syscall_netbsd_arm.go", - "zerrors_netbsd_arm.go", - "zsyscall_netbsd_arm.go", - "zsysnum_netbsd_arm.go", - "ztypes_netbsd_arm.go", - ], - "@io_bazel_rules_go//go/platform:openbsd_386": [ - "asm_openbsd_386.s", - "openbsd_pledge.go", - "syscall_openbsd_386.go", - "zerrors_openbsd_386.go", - "zsyscall_openbsd_386.go", - "zsysctl_openbsd_386.go", - "zsysnum_openbsd_386.go", - "ztypes_openbsd_386.go", - ], - "@io_bazel_rules_go//go/platform:openbsd_amd64": [ - "asm_openbsd_amd64.s", - "openbsd_pledge.go", - "syscall_openbsd_amd64.go", - "zerrors_openbsd_amd64.go", - "zsyscall_openbsd_amd64.go", - "zsysctl_openbsd_amd64.go", - "zsysnum_openbsd_amd64.go", - "ztypes_openbsd_amd64.go", - ], - "@io_bazel_rules_go//go/platform:openbsd_arm": [ - "asm_openbsd_arm.s", - "openbsd_pledge.go", - "syscall_openbsd_arm.go", - "zerrors_openbsd_arm.go", - "zsyscall_openbsd_arm.go", - "zsysctl_openbsd_arm.go", - "zsysnum_openbsd_arm.go", - "ztypes_openbsd_arm.go", - ], - "@io_bazel_rules_go//go/platform:solaris_amd64": [ - "asm_solaris_amd64.s", - "syscall_solaris_amd64.go", - "zerrors_solaris_amd64.go", - "zsyscall_solaris_amd64.go", - "ztypes_solaris_amd64.go", - ], - "//conditions:default": [], - }), - cgo = True, - importpath = "golang.org/x/sys/unix", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "export_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "export_test.go", - ], - "//conditions:default": [], - }), - embed = [":go_default_library"], - importpath = "golang.org/x/sys/unix", -) - -go_test( - name = "go_default_xtest", - srcs = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "example_test.go", - "mmap_unix_test.go", - "syscall_bsd_test.go", - "syscall_darwin_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - "xattr_test.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "example_test.go", - "mmap_unix_test.go", - "syscall_bsd_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "example_test.go", - "mmap_unix_test.go", - "syscall_bsd_test.go", - "syscall_freebsd_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - "xattr_test.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "creds_test.go", - "dev_linux_test.go", - "example_test.go", - "mmap_unix_test.go", - "syscall_linux_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - "xattr_test.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "example_test.go", - "mmap_unix_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "example_test.go", - "mmap_unix_test.go", - "openbsd_test.go", - "syscall_bsd_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "example_test.go", - "mmap_unix_test.go", - "syscall_solaris_test.go", - "syscall_test.go", - "syscall_unix_test.go", - "timestruct_test.go", - ], - "//conditions:default": [], - }), - importpath = "golang.org/x/sys/unix_test", - deps = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - ":go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - ":go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md deleted file mode 100644 index bc6f6031..00000000 --- a/vendor/golang.org/x/sys/unix/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# Building `sys/unix` - -The sys/unix package provides access to the raw system call interface of the -underlying operating system. See: https://godoc.org/golang.org/x/sys/unix - -Porting Go to a new architecture/OS combination or adding syscalls, types, or -constants to an existing architecture/OS pair requires some manual effort; -however, there are tools that automate much of the process. - -## Build Systems - -There are currently two ways we generate the necessary files. We are currently -migrating the build system to use containers so the builds are reproducible. -This is being done on an OS-by-OS basis. Please update this documentation as -components of the build system change. - -### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`) - -The old build system generates the Go files based on the C header files -present on your system. This means that files -for a given GOOS/GOARCH pair must be generated on a system with that OS and -architecture. This also means that the generated code can differ from system -to system, based on differences in the header files. - -To avoid this, if you are using the old build system, only generate the Go -files on an installation with unmodified header files. It is also important to -keep track of which version of the OS the files were generated from (ex. -Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes -and have each OS upgrade correspond to a single change. - -To build the files for your current OS and architecture, make sure GOOS and -GOARCH are set correctly and run `mkall.sh`. This will generate the files for -your specific system. Running `mkall.sh -n` shows the commands that will be run. - -Requirements: bash, perl, go - -### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`) - -The new build system uses a Docker container to generate the go files directly -from source checkouts of the kernel and various system libraries. This means -that on any platform that supports Docker, all the files using the new build -system can be generated at once, and generated files will not change based on -what the person running the scripts has installed on their computer. - -The OS specific files for the new build system are located in the `${GOOS}` -directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When -the kernel or system library updates, modify the Dockerfile at -`${GOOS}/Dockerfile` to checkout the new release of the source. - -To build all the files under the new build system, you must be on an amd64/Linux -system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will -then generate all of the files for all of the GOOS/GOARCH pairs in the new build -system. Running `mkall.sh -n` shows the commands that will be run. - -Requirements: bash, perl, go, docker - -## Component files - -This section describes the various files used in the code generation process. -It also contains instructions on how to modify these files to add a new -architecture/OS or to add additional syscalls, types, or constants. Note that -if you are using the new build system, the scripts cannot be called normally. -They must be called from within the docker container. - -### asm files - -The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system -call dispatch. There are three entry points: -``` - func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) - func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) - func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) -``` -The first and second are the standard ones; they differ only in how many -arguments can be passed to the kernel. The third is for low-level use by the -ForkExec wrapper. Unlike the first two, it does not call into the scheduler to -let it know that a system call is running. - -When porting Go to an new architecture/OS, this file must be implemented for -each GOOS/GOARCH pair. - -### mksysnum - -Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl` -for the old system). This script takes in a list of header files containing the -syscall number declarations and parses them to produce the corresponding list of -Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated -constants. - -Adding new syscall numbers is mostly done by running the build on a sufficiently -new installation of the target OS (or updating the source checkouts for the -new build system). However, depending on the OS, you make need to update the -parsing in mksysnum. - -### mksyscall.pl - -The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are -hand-written Go files which implement system calls (for unix, the specific OS, -or the specific OS/Architecture pair respectively) that need special handling -and list `//sys` comments giving prototypes for ones that can be generated. - -The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts -them into syscalls. This requires the name of the prototype in the comment to -match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function -prototype can be exported (capitalized) or not. - -Adding a new syscall often just requires adding a new `//sys` function prototype -with the desired arguments and a capitalized name so it is exported. However, if -you want the interface to the syscall to be different, often one will make an -unexported `//sys` prototype, an then write a custom wrapper in -`syscall_${GOOS}.go`. - -### types files - -For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or -`types_${GOOS}.go` on the old system). This file includes standard C headers and -creates Go type aliases to the corresponding C types. The file is then fed -through godef to get the Go compatible definitions. Finally, the generated code -is fed though mkpost.go to format the code correctly and remove any hidden or -private identifiers. This cleaned-up code is written to -`ztypes_${GOOS}_${GOARCH}.go`. - -The hardest part about preparing this file is figuring out which headers to -include and which symbols need to be `#define`d to get the actual data -structures that pass through to the kernel system calls. Some C libraries -preset alternate versions for binary compatibility and translate them on the -way in and out of system calls, but there is almost always a `#define` that can -get the real ones. -See `types_darwin.go` and `linux/types.go` for examples. - -To add a new type, add in the necessary include statement at the top of the -file (if it is not already there) and add in a type alias line. Note that if -your type is significantly different on different architectures, you may need -some `#if/#elif` macros in your include statements. - -### mkerrors.sh - -This script is used to generate the system's various constants. This doesn't -just include the error numbers and error strings, but also the signal numbers -an a wide variety of miscellaneous constants. The constants come from the list -of include files in the `includes_${uname}` variable. A regex then picks out -the desired `#define` statements, and generates the corresponding Go constants. -The error numbers and strings are generated from `#include `, and the -signal numbers and strings are generated from `#include `. All of -these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program, -`_errors.c`, which prints out all the constants. - -To add a constant, add the header that includes it to the appropriate variable. -Then, edit the regex (if necessary) to match the desired constant. Avoid making -the regex too broad to avoid matching unintended constants. - - -## Generated files - -### `zerror_${GOOS}_${GOARCH}.go` - -A file containing all of the system's generated error numbers, error strings, -signal numbers, and constants. Generated by `mkerrors.sh` (see above). - -### `zsyscall_${GOOS}_${GOARCH}.go` - -A file containing all the generated syscalls for a specific GOOS and GOARCH. -Generated by `mksyscall.pl` (see above). - -### `zsysnum_${GOOS}_${GOARCH}.go` - -A list of numeric constants for all the syscall number of the specific GOOS -and GOARCH. Generated by mksysnum (see above). - -### `ztypes_${GOOS}_${GOARCH}.go` - -A file containing Go types for passing into (or returning from) syscalls. -Generated by godefs and the types file (see above). diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go new file mode 100644 index 00000000..951fce4d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build go1.9 + +package unix + +import "syscall" + +type Signal = syscall.Signal +type Errno = syscall.Errno +type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index a96f0ebc..3a6ac648 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go deleted file mode 100644 index 1b508319..00000000 --- a/vendor/golang.org/x/sys/unix/creds_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package unix_test - -import ( - "bytes" - "go/build" - "net" - "os" - "testing" - - "golang.org/x/sys/unix" -) - -// TestSCMCredentials tests the sending and receiving of credentials -// (PID, UID, GID) in an ancillary message between two UNIX -// sockets. The SO_PASSCRED socket option is enabled on the sending -// socket for this to work. -func TestSCMCredentials(t *testing.T) { - socketTypeTests := []struct { - socketType int - dataLen int - }{ - { - unix.SOCK_STREAM, - 1, - }, { - unix.SOCK_DGRAM, - 0, - }, - } - - for _, tt := range socketTypeTests { - if tt.socketType == unix.SOCK_DGRAM && !atLeast1p10() { - t.Log("skipping DGRAM test on pre-1.10") - continue - } - - fds, err := unix.Socketpair(unix.AF_LOCAL, tt.socketType, 0) - if err != nil { - t.Fatalf("Socketpair: %v", err) - } - defer unix.Close(fds[0]) - defer unix.Close(fds[1]) - - err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) - if err != nil { - t.Fatalf("SetsockoptInt: %v", err) - } - - srvFile := os.NewFile(uintptr(fds[0]), "server") - defer srvFile.Close() - srv, err := net.FileConn(srvFile) - if err != nil { - t.Errorf("FileConn: %v", err) - return - } - defer srv.Close() - - cliFile := os.NewFile(uintptr(fds[1]), "client") - defer cliFile.Close() - cli, err := net.FileConn(cliFile) - if err != nil { - t.Errorf("FileConn: %v", err) - return - } - defer cli.Close() - - var ucred unix.Ucred - ucred.Pid = int32(os.Getpid()) - ucred.Uid = uint32(os.Getuid()) - ucred.Gid = uint32(os.Getgid()) - oob := unix.UnixCredentials(&ucred) - - // On SOCK_STREAM, this is internally going to send a dummy byte - n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) - if err != nil { - t.Fatalf("WriteMsgUnix: %v", err) - } - if n != 0 { - t.Fatalf("WriteMsgUnix n = %d, want 0", n) - } - if oobn != len(oob) { - t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) - } - - oob2 := make([]byte, 10*len(oob)) - n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) - if err != nil { - t.Fatalf("ReadMsgUnix: %v", err) - } - if flags != 0 { - t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) - } - if n != tt.dataLen { - t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen) - } - if oobn2 != oobn { - // without SO_PASSCRED set on the socket, ReadMsgUnix will - // return zero oob bytes - t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) - } - oob2 = oob2[:oobn2] - if !bytes.Equal(oob, oob2) { - t.Fatal("ReadMsgUnix oob bytes don't match") - } - - scm, err := unix.ParseSocketControlMessage(oob2) - if err != nil { - t.Fatalf("ParseSocketControlMessage: %v", err) - } - newUcred, err := unix.ParseUnixCredentials(&scm[0]) - if err != nil { - t.Fatalf("ParseUnixCredentials: %v", err) - } - if *newUcred != ucred { - t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) - } - } -} - -// atLeast1p10 reports whether we are running on Go 1.10 or later. -func atLeast1p10() bool { - for _, ver := range build.Default.ReleaseTags { - if ver == "go1.10" { - return true - } - } - return false -} diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go new file mode 100644 index 00000000..5e5fb451 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc + +// Functions to access/create device major and minor numbers matching the +// encoding used by AIX. + +package unix + +// Major returns the major component of a Linux device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 16) & 0xffff) +} + +// Minor returns the minor component of a Linux device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffff) +} + +// Mkdev returns a Linux device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + return uint64(((major) << 16) | (minor)) +} diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go new file mode 100644 index 00000000..8b401244 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc64 + +// Functions to access/create device major and minor numbers matching the +// encoding used AIX. + +package unix + +// Major returns the major component of a Linux device number. +func Major(dev uint64) uint32 { + return uint32((dev & 0x3fffffff00000000) >> 32) +} + +// Minor returns the minor component of a Linux device number. +func Minor(dev uint64) uint32 { + return uint32((dev & 0x00000000ffffffff) >> 0) +} + +// Mkdev returns a Linux device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + var DEVNO64 uint64 + DEVNO64 = 0x8000000000000000 + return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64) +} diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go deleted file mode 100644 index 51645289..00000000 --- a/vendor/golang.org/x/sys/unix/dev_linux_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // well known major/minor numbers according to - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/admin-guide/devices.txt - {"/dev/null", 1, 3}, - {"/dev/zero", 1, 5}, - {"/dev/random", 1, 8}, - {"/dev/full", 1, 7}, - {"/dev/urandom", 1, 9}, - {"/dev/tty", 5, 0}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - if err == unix.EACCES { - t.Skip("no permission to stat device, skipping test") - } - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - - } -} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 95fd3531..4407c505 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 706b3cd1..84178b0a 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/example_test.go b/vendor/golang.org/x/sys/unix/example_test.go deleted file mode 100644 index 10619afd..00000000 --- a/vendor/golang.org/x/sys/unix/example_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix_test - -import ( - "log" - "os" - - "golang.org/x/sys/unix" -) - -func ExampleExec() { - err := unix.Exec("/bin/ls", []string{"ls", "-al"}, os.Environ()) - log.Fatal(err) -} diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go deleted file mode 100644 index e8024690..00000000 --- a/vendor/golang.org/x/sys/unix/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix - -var Itoa = itoa diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 0c58c7e1..9379ba9c 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -14,7 +14,11 @@ var fcntl64Syscall uintptr = SYS_FCNTL // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 50062e3c..cd6f5a61 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // +build gccgo +// +build !aix package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 46523ced..c44730c5 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // +build gccgo +// +build !aix #include #include diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go new file mode 100644 index 00000000..f121a8d6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +import "runtime" + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + err := ioctlSetWinsize(fd, req, value) + runtime.KeepAlive(value) + return err +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req uint, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + err := ioctlSetTermios(fd, req, value) + runtime.KeepAlive(value) + return err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh deleted file mode 100755 index 1715122b..00000000 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This script runs or (given -n) prints suggested commands to generate files for -# the Architecture/OS specified by the GOARCH and GOOS environment variables. -# See README.md for more information about how the build system works. - -GOOSARCH="${GOOS}_${GOARCH}" - -# defaults -mksyscall="./mksyscall.pl" -mkerrors="./mkerrors.sh" -zerrors="zerrors_$GOOSARCH.go" -mksysctl="" -zsysctl="zsysctl_$GOOSARCH.go" -mksysnum= -mktypes= -run="sh" -cmd="" - -case "$1" in --syscalls) - for i in zsyscall*go - do - # Run the command line that appears in the first line - # of the generated file to regenerate it. - sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i - rm _$i - done - exit 0 - ;; --n) - run="cat" - cmd="echo" - shift -esac - -case "$#" in -0) - ;; -*) - echo 'usage: mkall.sh [-n]' 1>&2 - exit 2 -esac - -if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then - # Use then new build system - # Files generated through docker (use $cmd so you can Ctl-C the build or run) - $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS - exit -fi - -GOOSARCH_in=syscall_$GOOSARCH.go -case "$GOOSARCH" in -_* | *_ | _) - echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 - exit 1 - ;; -darwin_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_amd64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm) - mkerrors="$mkerrors" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -dragonfly_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -dragonfly" - mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_amd64) - mkerrors="$mkerrors -m64" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_sparc64) - GOOSARCH_in=syscall_linux_sparc64.go - unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -netbsd -arm" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -openbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -openbsd" - mksysctl="./mksysctl_openbsd.pl" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -openbsd" - mksysctl="./mksysctl_openbsd.pl" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -openbsd -arm" - mksysctl="./mksysctl_openbsd.pl" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -solaris_amd64) - mksyscall="./mksyscall_solaris.pl" - mkerrors="$mkerrors -m64" - mksysnum= - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -*) - echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 - exit 1 - ;; -esac - -( - if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi - case "$GOOS" in - *) - syscall_goos="syscall_$GOOS.go" - case "$GOOS" in - darwin | dragonfly | freebsd | netbsd | openbsd) - syscall_goos="syscall_bsd.go $syscall_goos" - ;; - esac - if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi - ;; - esac - if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi - if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then - echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; - fi -) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh deleted file mode 100755 index ddc50a01..00000000 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ /dev/null @@ -1,603 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# Generate Go code listing errors and other #defined constant -# values (ENAMETOOLONG etc.), by asking the preprocessor -# about the definitions. - -unset LANG -export LC_ALL=C -export LC_CTYPE=C - -if test -z "$GOARCH" -o -z "$GOOS"; then - echo 1>&2 "GOARCH or GOOS not defined in environment" - exit 1 -fi - -# Check that we are using the new build system if we should -if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then - if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then - echo 1>&2 "In the new build system, mkerrors should not be called directly." - echo 1>&2 "See README.md" - exit 1 - fi -fi - -CC=${CC:-cc} - -if [[ "$GOOS" = "solaris" ]]; then - # Assumes GNU versions of utilities in PATH. - export PATH=/usr/gnu/bin:$PATH -fi - -uname=$(uname) - -includes_Darwin=' -#define _DARWIN_C_SOURCE -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_DragonFly=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_FreeBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if __FreeBSD__ >= 10 -#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10 -#undef SIOCAIFADDR -#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data -#undef SIOCSIFPHYADDR -#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data -#endif -' - -includes_Linux=' -#define _LARGEFILE_SOURCE -#define _LARGEFILE64_SOURCE -#ifndef __LP64__ -#define _FILE_OFFSET_BITS 64 -#endif -#define _GNU_SOURCE - -// is broken on powerpc64, as it fails to include definitions of -// these structures. We just include them copied from . -#if defined(__powerpc__) -struct sgttyb { - char sg_ispeed; - char sg_ospeed; - char sg_erase; - char sg_kill; - short sg_flags; -}; - -struct tchars { - char t_intrc; - char t_quitc; - char t_startc; - char t_stopc; - char t_eofc; - char t_brkc; -}; - -struct ltchars { - char t_suspc; - char t_dsuspc; - char t_rprntc; - char t_flushc; - char t_werasc; - char t_lnextc; -}; -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - -#ifndef PTRACE_GETREGS -#define PTRACE_GETREGS 0xc -#endif - -#ifndef PTRACE_SETREGS -#define PTRACE_SETREGS 0xd -#endif - -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifdef SOL_BLUETOOTH -// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h -// but it is already in bluetooth_linux.go -#undef SOL_BLUETOOTH -#endif - -// Certain constants are missing from the fs/crypto UAPI -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 -#define FS_MAX_KEY_SIZE 64 -' - -includes_NetBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// Needed since refers to it... -#define schedppq 1 -' - -includes_OpenBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// We keep some constants not supported in OpenBSD 5.5 and beyond for -// the promise of compatibility. -#define EMUL_ENABLED 0x1 -#define EMUL_NATIVE 0x2 -#define IPV6_FAITH 0x1d -#define IPV6_OPTIONS 0x1 -#define IPV6_RTHDR_STRICT 0x1 -#define IPV6_SOCKOPT_RESERVED1 0x3 -#define SIOCGIFGENERIC 0xc020693a -#define SIOCSIFGENERIC 0x80206939 -#define WALTSIG 0x4 -' - -includes_SunOS=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - - -includes=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' -ccflags="$@" - -# Write go tool cgo -godefs input. -( - echo package unix - echo - echo '/*' - indirect="includes_$(uname)" - echo "${!indirect} $includes" - echo '*/' - echo 'import "C"' - echo 'import "syscall"' - echo - echo 'const (' - - # The gcc command line prints all the #defines - # it encounters while processing the input - echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | - awk ' - $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} - - $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers - $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} - $2 ~ /^(SCM_SRCRT)$/ {next} - $2 ~ /^(MAP_FAILED)$/ {next} - $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc. - - $2 ~ /^EXTATTR_NAMESPACE_NAMES/ || - $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next} - - $2 !~ /^ETH_/ && - $2 !~ /^EPROC_/ && - $2 !~ /^EQUIV_/ && - $2 !~ /^EXPR_/ && - $2 ~ /^E[A-Z0-9_]+$/ || - $2 ~ /^B[0-9_]+$/ || - $2 ~ /^(OLD|NEW)DEV$/ || - $2 == "BOTHER" || - $2 ~ /^CI?BAUD(EX)?$/ || - $2 == "IBSHIFT" || - $2 ~ /^V[A-Z0-9]+$/ || - $2 ~ /^CS[A-Z0-9]/ || - $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || - $2 ~ /^IGN/ || - $2 ~ /^IX(ON|ANY|OFF)$/ || - $2 ~ /^IN(LCR|PCK)$/ || - $2 !~ "X86_CR3_PCID_NOFLUSH" && - $2 ~ /(^FLU?SH)|(FLU?SH$)/ || - $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || - $2 == "BRKINT" || - $2 == "HUPCL" || - $2 == "PENDIN" || - $2 == "TOSTOP" || - $2 == "XCASE" || - $2 == "ALTWERASE" || - $2 == "NOKERNINFO" || - $2 ~ /^PAR/ || - $2 ~ /^SIG[^_]/ || - $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || - $2 ~ /^O?XTABS$/ || - $2 ~ /^TC[IO](ON|OFF)$/ || - $2 ~ /^IN_/ || - $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || - $2 ~ /^TP_STATUS_/ || - $2 ~ /^FALLOC_/ || - $2 == "ICMPV6_FILTER" || - $2 == "SOMAXCONN" || - $2 == "NAME_MAX" || - $2 == "IFNAMSIZ" || - $2 ~ /^CTL_(HW|KERN|MAXNAME|NET|QUERY)$/ || - $2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ || - $2 ~ /^HW_MACHINE$/ || - $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT|UMOUNT)_/ || - $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || - $2 ~ /^LINUX_REBOOT_CMD_/ || - $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || - $2 !~ "NLA_TYPE_MASK" && - $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || - $2 ~ /^SIOC/ || - $2 ~ /^TIOC/ || - $2 ~ /^TCGET/ || - $2 ~ /^TCSET/ || - $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || - $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || - $2 ~ /^BIOC/ || - $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || - $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || - $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || - $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && - $2 ~ /^(BPF|DLT)_/ || - $2 ~ /^CLOCK_/ || - $2 ~ /^CAN_/ || - $2 ~ /^CAP_/ || - $2 ~ /^ALG_/ || - $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ || - $2 ~ /^GRND_/ || - $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || - $2 ~ /^KEYCTL_/ || - $2 ~ /^PERF_EVENT_IOC_/ || - $2 ~ /^SECCOMP_MODE_/ || - $2 ~ /^SPLICE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && - $2 ~ /^[A-Z0-9_]+_MAGIC2?$/ || - $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || - $2 ~ /^(TASKSTATS|TS)_/ || - $2 ~ /^CGROUPSTATS_/ || - $2 ~ /^GENL_/ || - $2 ~ /^STATX_/ || - $2 ~ /^UTIME_/ || - $2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ || - $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || - $2 ~ /^FSOPT_/ || - $2 ~ /^WDIOC_/ || - $2 ~ /^NFN/ || - $2 ~ /^(HDIO|WIN|SMART)_/ || - $2 !~ "WMESGLEN" && - $2 ~ /^W[A-Z0-9]+$/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} - $2 ~ /^__WCOREFLAG$/ {next} - $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} - - {next} - ' | sort - - echo ')' -) >_const.go - -# Pull out the error names for later. -errors=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | - sort -) - -# Pull out the signal names for later. -signals=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort -) - -# Again, writing regexps to a file. -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | - sort >_error.grep -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort >_signal.grep - -echo '// mkerrors.sh' "$@" -echo '// Code generated by the command above; see README.md. DO NOT EDIT.' -echo -echo "// +build ${GOARCH},${GOOS}" -echo -go tool cgo -godefs -- "$@" _const.go >_error.out -cat _error.out | grep -vf _error.grep | grep -vf _signal.grep -echo -echo '// Errors' -echo 'const (' -cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/' -echo ')' - -echo -echo '// Signals' -echo 'const (' -cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/' -echo ')' - -# Run C program to print error and syscall strings. -( - echo -E " -#include -#include -#include -#include -#include -#include - -#define nelem(x) (sizeof(x)/sizeof((x)[0])) - -enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below - -struct tuple { - int num; - const char *name; -}; - -struct tuple errors[] = { -" - for i in $errors - do - echo -E ' {'$i', "'$i'" },' - done - - echo -E " -}; - -struct tuple signals[] = { -" - for i in $signals - do - echo -E ' {'$i', "'$i'" },' - done - - # Use -E because on some systems bash builtin interprets \n itself. - echo -E ' -}; - -static int -tuplecmp(const void *a, const void *b) -{ - return ((struct tuple *)a)->num - ((struct tuple *)b)->num; -} - -int -main(void) -{ - int i, e; - char buf[1024], *p; - - printf("\n\n// Error table\n"); - printf("var errorList = [...]struct {\n"); - printf("\tnum syscall.Errno\n"); - printf("\tname string\n"); - printf("\tdesc string\n"); - printf("} {\n"); - qsort(errors, nelem(errors), sizeof errors[0], tuplecmp); - for(i=0; i 0 && errors[i-1].num == e) - continue; - strcpy(buf, strerror(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - printf("\t{ %d, \"%s\", \"%s\" },\n", e, errors[i].name, buf); - } - printf("}\n\n"); - - printf("\n\n// Signal table\n"); - printf("var signalList = [...]struct {\n"); - printf("\tnum syscall.Signal\n"); - printf("\tname string\n"); - printf("\tdesc string\n"); - printf("} {\n"); - qsort(signals, nelem(signals), sizeof signals[0], tuplecmp); - for(i=0; i 0 && signals[i-1].num == e) - continue; - strcpy(buf, strsignal(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - // cut trailing : number. - p = strrchr(buf, ":"[0]); - if(p) - *p = '\0'; - printf("\t{ %d, \"%s\", \"%s\" },\n", e, signals[i].name, buf); - } - printf("}\n\n"); - - return 0; -} - -' -) >_errors.c - -$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl deleted file mode 100755 index 1f6b926f..00000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.pl +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_darwin.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named errno. - -# A line beginning with //sysnb is like //sys, except that the -# goroutine will not be suspended during the execution of the system -# call. This must only be used for system calls which can never -# block, as otherwise the system call could cause all goroutines to -# hang. - -use strict; - -my $cmdline = "mksyscall.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $plan9 = 0; -my $openbsd = 0; -my $netbsd = 0; -my $dragonfly = 0; -my $arm = 0; # 64-bit value should use (even, odd)-pair -my $tags = ""; # build tags - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-plan9") { - $plan9 = 1; - shift; -} -if($ARGV[0] eq "-openbsd") { - $openbsd = 1; - shift; -} -if($ARGV[0] eq "-netbsd") { - $netbsd = 1; - shift; -} -if($ARGV[0] eq "-dragonfly") { - $dragonfly = 1; - shift; -} -if($ARGV[0] eq "-arm") { - $arm = 1; - shift; -} -if($ARGV[0] eq "-tags") { - shift; - $tags = $ARGV[0]; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; - exit 1; -} - -# Check that we are using the new build system if we should -if($ENV{'GOOS'} eq "linux" && $ENV{'GOARCH'} ne "sparc64") { - if($ENV{'GOLANG_SYS_BUILD'} ne "docker") { - print STDERR "In the new build system, mksyscall should not be called directly.\n"; - print STDERR "See README.md\n"; - exit 1; - } -} - - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $text = ""; -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, errno error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # Go function header. - my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass dummy pointer in that case. - # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - $text .= "\tvar _p$n unsafe.Pointer\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; - $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; - $text .= "\n"; - push @args, "uintptr(_p$n)", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && ($openbsd || $netbsd)) { - push @args, "0"; - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $dragonfly) { - if ($func !~ /^extp(read|write)/i) { - push @args, "0"; - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $_32bit ne "") { - if(@args % 2 && $arm) { - # arm abi specifies 64-bit argument uses - # (even, odd) pair - push @args, "0" - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } - } else { - push @args, "uintptr($name)"; - } - } - - # Determine which form to use; pad args with zeros. - my $asm = "Syscall"; - if ($nonblock) { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - $asm = "RawSyscallNoError"; - } else { - $asm = "RawSyscall"; - } - } else { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - $asm = "SyscallNoError"; - } - } - if(@args <= 3) { - while(@args < 3) { - push @args, "0"; - } - } elsif(@args <= 6) { - $asm .= "6"; - while(@args < 6) { - push @args, "0"; - } - } elsif(@args <= 9) { - $asm .= "9"; - while(@args < 9) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # System call number. - if($sysname eq "") { - $sysname = "SYS_$func"; - $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar - $sysname =~ y/a-z/A-Z/; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm($sysname, $args)"; - - # Assign return values. - my $body = ""; - my @ret = ("_", "_", "_"); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err" && !$plan9) { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } elsif($name eq "err" && $plan9) { - $ret[0] = "r0"; - $ret[2] = "e1"; - next; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1" || $plan9) { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - # raw syscall without error on Linux, see golang.org/issue/22924 - $text .= "\t$ret[0], $ret[1] := $call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - } - $text .= $body; - - if ($plan9 && $ret[2] eq "e1") { - $text .= "\tif int32(r0) == -1 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } elsif ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = errnoErr(e1)\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n\n"; -} - -chomp $text; -chomp $text; - -if($errors) { - exit 1; -} - -print <) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - $package = $1 if !$package && /^package (\S+)$/; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, err error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # So file name. - if($modname eq "") { - $modname = "libc"; - } - - # System call name. - if($sysname eq "") { - $sysname = "$func"; - } - - # System call pointer variable name. - my $sysvarname = "proc$sysname"; - - my $strconvfunc = "BytePtrFromString"; - my $strconvtype = "*byte"; - - $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. - - # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; - # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; - # Library proc address variable. - push @vars, $sysvarname; - - # Go function header. - $out = join(', ', @out); - if($out ne "") { - $out = " ($out)"; - } - if($text ne "") { - $text .= "\n" - } - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, _ = $strconvfunc($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass nil in that case. - $text .= "\tvar _p$n *$1\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && $_32bit ne "") { - if($_32bit eq "big-endian") { - push @args, "uintptr($name >> 32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name >> 32)"; - } - } elsif($type eq "bool") { - $text .= "\tvar _p$n uint32\n"; - $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; - push @args, "uintptr(_p$n)"; - $n++; - } else { - push @args, "uintptr($name)"; - } - } - my $nargs = @args; - - # Determine which form to use; pad args with zeros. - my $asm = "sysvicall6"; - if ($nonblock) { - $asm = "rawSysvicall6"; - } - if(@args <= 6) { - while(@args < 6) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; - - # Assign return values. - my $body = ""; - my $failexpr = ""; - my @ret = ("_", "_", "_"); - my @pout= (); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err") { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1") { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - $text .= $body; - - if ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n"; -} - -if($errors) { - exit 1; -} - -print < "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (
) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <){ - if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ - my $name = $1; - my $num = $2; - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;" - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if($line =~ /^(.*)\\$/) { - # Handle continuation - $line = $1; - $_ =~ s/^\s+//; - $line .= $_; - } else { - # New line - $line = $_; - } - next if $line =~ /\\$/; - if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { - my $num = $1; - my $proto = $6; - my $compat = $8; - my $name = "$7_$9"; - - $name = "$7_$11" if $11 ne ''; - $name =~ y/a-z/A-Z/; - - if($compat eq '' || $compat eq '13' || $compat eq '30' || $compat eq '50') { - print " $name = $num; // $proto\n"; - } - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ - my $num = $1; - my $proto = $3; - my $name = $4; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print < 6.2, pass execpromises to the syscall. + if maj > 6 || (maj == 6 && min > 2) { + exptr, err := syscall.BytePtrFromString(execpromises) + if err != nil { return err } - pathsUnsafe = unsafe.Pointer(&pathsPtr[0]) + expr = unsafe.Pointer(exptr) } - _, _, e := syscall.Syscall(_SYS_PLEDGE, uintptr(promisesUnsafe), uintptr(pathsUnsafe), 0) + + _, _, e := syscall.Syscall(_SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) if e != 0 { return e } + return nil } + +// majmin returns major and minor version number for an OpenBSD system. +func majmin() (major int, minor int, err error) { + var v Utsname + err = Uname(&v) + if err != nil { + return + } + + major, err = strconv.Atoi(string(v.Release[0])) + if err != nil { + err = errors.New("cannot parse major version number returned by uname") + return + } + + minor, err = strconv.Atoi(string(v.Release[2])) + if err != nil { + err = errors.New("cannot parse minor version number returned by uname") + return + } + + return +} diff --git a/vendor/golang.org/x/sys/unix/openbsd_test.go b/vendor/golang.org/x/sys/unix/openbsd_test.go deleted file mode 100644 index 734d7658..00000000 --- a/vendor/golang.org/x/sys/unix/openbsd_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build openbsd - -// This, on the face of it, bizarre testing mechanism is necessary because -// the only reliable way to gauge whether or not a pledge(2) call has succeeded -// is that the program has been killed as a result of breaking its pledge. - -package unix_test - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" - - "golang.org/x/sys/unix" -) - -type testProc struct { - fn func() // should always exit instead of returning - cleanup func() error // for instance, delete coredumps from testing pledge - success bool // whether zero-exit means success or failure -} - -var ( - testProcs = map[string]testProc{} - procName = "" -) - -const ( - optName = "sys-unix-internal-procname" -) - -func init() { - flag.StringVar(&procName, optName, "", "internal use only") -} - -// testCmd generates a proper command that, when executed, runs the test -// corresponding to the given key. -func testCmd(procName string) (*exec.Cmd, error) { - exe, err := filepath.Abs(os.Args[0]) - if err != nil { - return nil, err - } - cmd := exec.Command(exe, "-"+optName+"="+procName) - cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr - return cmd, nil -} - -// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing -// a testProc with a key. -func ExitsCorrectly(procName string, t *testing.T) { - s := testProcs[procName] - c, err := testCmd(procName) - defer func() { - if s.cleanup() != nil { - t.Fatalf("Failed to run cleanup for %s", procName) - } - }() - if err != nil { - t.Fatalf("Failed to construct command for %s", procName) - } - if (c.Run() == nil) != s.success { - result := "succeed" - if !s.success { - result = "fail" - } - t.Fatalf("Process did not %s when it was supposed to", result) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - if procName != "" { - testProcs[procName].fn() - } - os.Exit(m.Run()) -} - -// For example, add a test for pledge. -func init() { - testProcs["pledge"] = testProc{ - func() { - fmt.Println(unix.Pledge("", nil)) - os.Exit(0) - }, - func() error { - files, err := ioutil.ReadDir(".") - if err != nil { - return err - } - for _, file := range files { - if filepath.Ext(file.Name()) == ".core" { - if err := os.Remove(file.Name()); err != nil { - return err - } - } - } - return nil - }, - false, - } -} - -func TestPledge(t *testing.T) { - ExitsCorrectly("pledge", t) -} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 83c85e01..bc2f3629 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index dd082043..ad026678 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly +// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index bb756ece..f153c067 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go index 35ed6643..17fb6986 100644 --- a/vendor/golang.org/x/sys/unix/str.go +++ b/vendor/golang.org/x/sys/unix/str.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index ef35fce8..0d4b1d7a 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go new file mode 100644 index 00000000..df1f9ea3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -0,0 +1,564 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +// Aix system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and +// wrap it in our own nicer implementation. + +package unix + +import ( + "syscall" + "unsafe" +) + +/* + * Wrapped + */ + +//sys utimes(path string, times *[2]Timeval) (err error) +func Utimes(path string, tv []Timeval) error { + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) +func UtimesNano(path string, ts []Timespec) error { + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = uint8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(fd, &rsa) +} + +//sys getcwd(buf []byte) (err error) + +const ImplementsGetwd = true + +func Getwd() (ret string, err error) { + for len := uint64(4096); ; len *= 2 { + b := make([]byte, len) + err := getcwd(b) + if err == nil { + i := 0 + for b[i] != 0 { + i++ + } + return string(b[0:i]), nil + } + if err != ERANGE { + return "", err + } + } +} + +func Getcwd(buf []byte) (n int, err error) { + err = getcwd(buf) + if err == nil { + i := 0 + for buf[i] != 0 { + i++ + } + n = i + 1 + } + return +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 16 on BSD. + if n < 0 || n > 1000 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +/* + * Socket + */ + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if nfd == -1 { + return + } + sa, err = anyToSockaddr(fd, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + // Recvmsg not implemented on AIX + sa := new(SockaddrUnix) + return -1, -1, -1, sa, ENOSYS +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + // SendmsgN not implemented on AIX + return -1, ENOSYS +} + +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + + // Some versions of AIX have a bug in getsockname (see IV78655). + // We can't rely on sa.Len being set correctly. + n := SizeofSockaddrUnix - 3 // substract leading Family, Len, terminating NUL. + for i := 0; i < n; i++ { + if pp.Path[i] == 0 { + n = i + break + } + } + + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Gettimeofday(tv *Timeval) (err error) { + err = gettimeofday(tv, nil) + return +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +//sys getdirent(fd int, buf []byte) (n int, err error) +func ReadDirent(fd int, buf []byte) (n int, err error) { + return getdirent(fd, buf) +} + +//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + var r Pid_t + err = ERESTART + // AIX wait4 may return with ERESTART errno, while the processus is still + // active. + for err == ERESTART { + r, err = wait4(Pid_t(pid), &status, options, rusage) + } + wpid = int(r) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +/* + * Wait + */ + +type WaitStatus uint32 + +func (w WaitStatus) Stopped() bool { return w&0x40 != 0 } +func (w WaitStatus) StopSignal() syscall.Signal { + if !w.Stopped() { + return -1 + } + return syscall.Signal(w>>8) & 0xFF +} + +func (w WaitStatus) Exited() bool { return w&0xFF == 0 } +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int((w >> 8) & 0xFF) +} + +func (w WaitStatus) Signaled() bool { return w&0x40 == 0 && w&0xFF != 0 } +func (w WaitStatus) Signal() syscall.Signal { + if !w.Signaled() { + return -1 + } + return syscall.Signal(w>>16) & 0xFF +} + +func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } + +func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys ioctl(fd int, req uint, arg uintptr) (err error) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +func ioctlSetTermios(fd int, req uint, value *Termios) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX +// There is no way to create a custom fcntl and to keep //sys fcntl easily, +// Therefore, the programmer must call dup2 instead of fcntl in this case. + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +//sys FcntlInt(fd uintptr, cmd int, arg int) (r int,err error) = fcntl + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +//sys FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) = fcntl + +func Flock(fd int, how int) (err error) { + return syscall.Flock(fd, how) +} + +/* + * Direct access + */ + +//sys Acct(path string) (err error) +//sys Chdir(path string) (err error) +//sys Chroot(path string) (err error) +//sys Close(fd int) (err error) +//sys Dup(oldfd int) (fd int, err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) +//sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Fdatasync(fd int) (err error) +//sys Fsync(fd int) (err error) +// readdir_r +//sysnb Getpgid(pid int) (pgid int, err error) + +//sys Getpgrp() (pid int) + +//sysnb Getpid() (pid int) +//sysnb Getppid() (ppid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) +//sysnb Kill(pid int, sig syscall.Signal) (err error) +//sys Klogctl(typ int, buf []byte) (n int, err error) = syslog +//sys Mkdir(dirfd int, path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mkfifo(path string, mode uint32) (err error) +//sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64 +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) +//sys Removexattr(path string, attr string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Setdomainname(p []byte) (err error) +//sys Sethostname(p []byte) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sysnb Setsid() (pid int, err error) +//sysnb Settimeofday(tv *Timeval) (err error) + +//sys Setuid(uid int) (err error) +//sys Setgid(uid int) (err error) + +//sys Setpriority(which int, who int, prio int) (err error) +//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) +//sys Sync() +//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb Times(tms *Tms) (ticks uintptr, err error) +//sysnb Umask(mask int) (oldmask int) +//sysnb Uname(buf *Utsname) (err error) +//TODO umount +// //sys Unmount(target string, flags int) (err error) = umount +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Unshare(flags int) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys write(fd int, p []byte) (n int, err error) +//sys readlen(fd int, p *byte, np int) (n int, err error) = read +//sys writelen(fd int, p *byte, np int) (n int, err error) = write + +//sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 +//TODO Select +// //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys Truncate(path string, length int64) (err error) + +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +//sys munmap(addr uintptr, length uintptr) (err error) + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Madvise(b []byte, advice int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Msync(b []byte, flags int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + +//sys gettimeofday(tv *Timeval, tzp *Timezone) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) +//sys Utime(path string, buf *Utimbuf) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go new file mode 100644 index 00000000..c28af1f8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc + +package unix + +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 + +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go new file mode 100644 index 00000000..881cacc6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc64 + +package unix + +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek + +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int64(sec), Usec: int32(usec)} +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 53fb8518..33c8b5f0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -206,7 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_LINK: pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa)) @@ -286,7 +286,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { Close(nfd) return 0, nil, ECONNABORTED } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -306,7 +306,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { rsa.Addr.Family = AF_UNIX rsa.Addr.Len = SizeofSockaddrUnix } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) @@ -356,7 +356,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go deleted file mode 100644 index 6c4e2aca..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd openbsd - -package unix_test - -import ( - "os/exec" - "runtime" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -const MNT_WAIT = 1 -const MNT_NOWAIT = 2 - -func TestGetfsstat(t *testing.T) { - const flags = MNT_NOWAIT // see golang.org/issue/16937 - n, err := unix.Getfsstat(nil, flags) - if err != nil { - t.Fatal(err) - } - - data := make([]unix.Statfs_t, n) - n2, err := unix.Getfsstat(data, flags) - if err != nil { - t.Fatal(err) - } - if n != n2 { - t.Errorf("Getfsstat(nil) = %d, but subsequent Getfsstat(slice) = %d", n, n2) - } - for i, stat := range data { - if stat == (unix.Statfs_t{}) { - t.Errorf("index %v is an empty Statfs_t struct", i) - } - } - if t.Failed() { - for i, stat := range data[:n2] { - t.Logf("data[%v] = %+v", i, stat) - } - mount, err := exec.Command("mount").CombinedOutput() - if err != nil { - t.Logf("mount: %v\n%s", err, mount) - } else { - t.Logf("mount: %s", mount) - } - } -} - -func TestSelect(t *testing.T) { - err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) - if err != nil { - t.Fatalf("Select: %v", err) - } - - dur := 250 * time.Millisecond - tv := unix.NsecToTimeval(int64(dur)) - start := time.Now() - err = unix.Select(0, nil, nil, nil, &tv) - took := time.Since(start) - if err != nil { - t.Fatalf("Select: %v", err) - } - - // On some BSDs the actual timeout might also be slightly less than the requested. - // Add an acceptable margin to avoid flaky tests. - if took < dur*2/3 { - t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) - } -} - -func TestSysctlRaw(t *testing.T) { - if runtime.GOOS == "openbsd" { - t.Skip("kern.proc.pid does not exist on OpenBSD") - } - - _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid()) - if err != nil { - t.Fatal(err) - } -} - -func TestSysctlUint32(t *testing.T) { - maxproc, err := unix.SysctlUint32("kern.maxproc") - if err != nil { - t.Fatal(err) - } - t.Logf("kern.maxproc: %v", maxproc) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 79e94767..1aabc560 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -199,7 +199,13 @@ func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { return getxattr(link, attr, xattrPointer(dest), len(dest), 0, XATTR_NOFOLLOW) } -//sys setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) +//sys fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + return fgetxattr(fd, attr, xattrPointer(dest), len(dest), 0, 0) +} + +//sys setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) func Setxattr(path string, attr string, data []byte, flags int) (err error) { // The parameters for the OS X implementation vary slightly compared to the @@ -235,7 +241,13 @@ func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { return setxattr(link, attr, xattrPointer(data), len(data), 0, flags|XATTR_NOFOLLOW) } -//sys removexattr(path string, attr string, options int) (err error) +//sys fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) + +func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { + return fsetxattr(fd, attr, xattrPointer(data), len(data), 0, 0) +} + +//sys removexattr(path string, attr string, options int) (err error) func Removexattr(path string, attr string) (err error) { // We wrap around and explicitly zero out the options provided to the OS X @@ -248,6 +260,12 @@ func Lremovexattr(link string, attr string) (err error) { return removexattr(link, attr, XATTR_NOFOLLOW) } +//sys fremovexattr(fd int, attr string, options int) (err error) + +func Fremovexattr(fd int, attr string) (err error) { + return fremovexattr(fd, attr, 0) +} + //sys listxattr(path string, dest *byte, size int, options int) (sz int, err error) func Listxattr(path string, dest []byte) (sz int, err error) { @@ -258,6 +276,12 @@ func Llistxattr(link string, dest []byte) (sz int, err error) { return listxattr(link, xattrPointer(dest), len(dest), XATTR_NOFOLLOW) } +//sys flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + return flistxattr(fd, xattrPointer(dest), len(dest), 0) +} + func setattrlistTimes(path string, times []Timespec, flags int) error { _p0, err := BytePtrFromString(path) if err != nil { @@ -313,11 +337,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -529,10 +553,6 @@ func Uname(uname *Utsname) error { // Watchevent // Waitevent // Modwatch -// Fgetxattr -// Fsetxattr -// Fremovexattr -// Flistxattr // Fsctl // Initgroups // Posix_spawn diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_test.go b/vendor/golang.org/x/sys/unix/syscall_darwin_test.go deleted file mode 100644 index 65691d5c..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) []string { - var result []string - off := 0 - for i, b := range buf { - if b == 0 { - result = append(result, string(buf[off:i])) - off = i + 1 - } - } - return result -} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index b5072de2..79d125b3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -87,7 +87,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -143,11 +143,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index ba9df4ac..77a634c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,7 +13,6 @@ package unix import ( - "strings" "unsafe" ) @@ -58,14 +57,21 @@ func nametomib(name string) (mib []_C_int, err error) { return buf[0 : n/siz], nil } -//sysnb pipe() (r int, w int, err error) - func Pipe(p []int) (err error) { + return Pipe2(p, 0) +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL } - p[0], p[1], err = pipe() - return + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { @@ -89,7 +95,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -134,225 +140,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { return ENOSYS } -// Derive extattr namespace and attribute name - -func xattrnamespace(fullattr string) (ns int, attr string, err error) { - s := strings.IndexByte(fullattr, '.') - if s == -1 { - return -1, "", ENOATTR - } - - namespace := fullattr[0:s] - attr = fullattr[s+1:] - - switch namespace { - case "user": - return EXTATTR_NAMESPACE_USER, attr, nil - case "system": - return EXTATTR_NAMESPACE_SYSTEM, attr, nil - default: - return -1, "", ENOATTR - } -} - -func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { - if len(dest) > idx { - return unsafe.Pointer(&dest[idx]) - } else { - return unsafe.Pointer(_zero) - } -} - -// FreeBSD implements its own syscalls to handle extended attributes - -func Getxattr(file string, attr string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsize := len(dest) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return -1, err - } - - return ExtattrGetFile(file, nsid, a, uintptr(d), destsize) -} - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsize := len(dest) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return -1, err - } - - return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize) -} - -func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsize := len(dest) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return -1, err - } - - return ExtattrGetLink(link, nsid, a, uintptr(d), destsize) -} - -// flags are unused on FreeBSD - -func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) - datasiz := len(data) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz) - return -} - -func Setxattr(file string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) - datasiz := len(data) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz) - return -} - -func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) - datasiz := len(data) - - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz) - return -} - -func Removexattr(file string, attr string) (err error) { - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - err = ExtattrDeleteFile(file, nsid, a) - return -} - -func Fremovexattr(fd int, attr string) (err error) { - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - err = ExtattrDeleteFd(fd, nsid, a) - return -} - -func Lremovexattr(link string, attr string) (err error) { - nsid, a, err := xattrnamespace(attr) - if err != nil { - return - } - - err = ExtattrDeleteLink(link, nsid, a) - return -} - -func Listxattr(file string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsiz := len(dest) - - // FreeBSD won't allow you to list xattrs from multiple namespaces - s := 0 - for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) - - /* Errors accessing system attrs are ignored so that - * we can implement the Linux-like behavior of omitting errors that - * we don't have read permissions on - * - * Linux will still error if we ask for user attributes on a file that - * we don't have read permissions on, so don't ignore those errors - */ - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { - return s, e - } - - s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 - } - d = initxattrdest(dest, s) - } - - return s, nil -} - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsiz := len(dest) - - s := 0 - for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { - return s, e - } - - s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 - } - d = initxattrdest(dest, s) - } - - return s, nil -} - -func Llistxattr(link string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) - destsiz := len(dest) - - s := 0 - for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { - return s, e - } - - s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 - } - d = initxattrdest(dest, s) - } - - return s, nil -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) // ioctl itself should not be exposed directly, but additional get/set @@ -364,11 +151,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -602,14 +389,6 @@ func Uname(uname *Utsname) error { // Watchevent // Waitevent // Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr // Fsctl // Initgroups // Posix_spawn diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go deleted file mode 100644 index 0fec1a82..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd - -package unix_test - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "testing" - - "golang.org/x/sys/unix" -) - -func TestSysctlUint64(t *testing.T) { - _, err := unix.SysctlUint64("vm.swap_total") - if err != nil { - t.Fatal(err) - } -} - -// FIXME: Infrastructure for launching tests in subprocesses stolen from openbsd_test.go - refactor? -// testCmd generates a proper command that, when executed, runs the test -// corresponding to the given key. - -type testProc struct { - fn func() // should always exit instead of returning - arg func(t *testing.T) string // generate argument for test - cleanup func(arg string) error // for instance, delete coredumps from testing pledge - success bool // whether zero-exit means success or failure -} - -var ( - testProcs = map[string]testProc{} - procName = "" - procArg = "" -) - -const ( - optName = "sys-unix-internal-procname" - optArg = "sys-unix-internal-arg" -) - -func init() { - flag.StringVar(&procName, optName, "", "internal use only") - flag.StringVar(&procArg, optArg, "", "internal use only") - -} - -func testCmd(procName string, procArg string) (*exec.Cmd, error) { - exe, err := filepath.Abs(os.Args[0]) - if err != nil { - return nil, err - } - cmd := exec.Command(exe, "-"+optName+"="+procName, "-"+optArg+"="+procArg) - cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr - return cmd, nil -} - -// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing -// a testProc with a key. -func ExitsCorrectly(t *testing.T, procName string) { - s := testProcs[procName] - arg := "-" - if s.arg != nil { - arg = s.arg(t) - } - c, err := testCmd(procName, arg) - defer func(arg string) { - if err := s.cleanup(arg); err != nil { - t.Fatalf("Failed to run cleanup for %s %s %#v", procName, err, err) - } - }(arg) - if err != nil { - t.Fatalf("Failed to construct command for %s", procName) - } - if (c.Run() == nil) != s.success { - result := "succeed" - if !s.success { - result = "fail" - } - t.Fatalf("Process did not %s when it was supposed to", result) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - if procName != "" { - t := testProcs[procName] - t.fn() - os.Stderr.WriteString("test function did not exit\n") - if t.success { - os.Exit(1) - } else { - os.Exit(0) - } - } - os.Exit(m.Run()) -} - -// end of infrastructure - -const testfile = "gocapmodetest" -const testfile2 = testfile + "2" - -func CapEnterTest() { - _, err := os.OpenFile(path.Join(procArg, testfile), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(fmt.Sprintf("OpenFile: %s", err)) - } - - err = unix.CapEnter() - if err != nil { - panic(fmt.Sprintf("CapEnter: %s", err)) - } - - _, err = os.OpenFile(path.Join(procArg, testfile2), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("OpenFile works!") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) - } - os.Exit(0) -} - -func makeTempDir(t *testing.T) string { - d, err := ioutil.TempDir("", "go_openat_test") - if err != nil { - t.Fatalf("TempDir failed: %s", err) - } - return d -} - -func removeTempDir(arg string) error { - err := os.RemoveAll(arg) - if err != nil && err.(*os.PathError).Err == unix.ENOENT { - return nil - } - return err -} - -func init() { - testProcs["cap_enter"] = testProc{ - CapEnterTest, - makeTempDir, - removeTempDir, - true, - } -} - -func TestCapEnter(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skipf("skipping test on %s", runtime.GOARCH) - } - ExitsCorrectly(t, "cap_enter") -} - -func OpenatTest() { - f, err := os.Open(procArg) - if err != nil { - panic(err) - } - - err = unix.CapEnter() - if err != nil { - panic(fmt.Sprintf("CapEnter: %s", err)) - } - - fxx, err := unix.Openat(int(f.Fd()), "xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(err) - } - unix.Close(fxx) - - // The right to open BASE/xx is not ambient - _, err = os.OpenFile(procArg+"/xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("OpenFile succeeded") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) - } - - // Can't make a new directory either - err = os.Mkdir(procArg+"2", 0777) - if err == nil { - panic("MKdir succeeded") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("Mkdir failed wrong: %s %#v", err, err)) - } - - // Remove all caps except read and lookup. - r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_LOOKUP}) - if err != nil { - panic(fmt.Sprintf("CapRightsInit failed: %s %#v", err, err)) - } - err = unix.CapRightsLimit(f.Fd(), r) - if err != nil { - panic(fmt.Sprintf("CapRightsLimit failed: %s %#v", err, err)) - } - - // Check we can get the rights back again - r, err = unix.CapRightsGet(f.Fd()) - if err != nil { - panic(fmt.Sprintf("CapRightsGet failed: %s %#v", err, err)) - } - b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP}) - if err != nil { - panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) - } - if !b { - panic(fmt.Sprintf("Unexpected rights")) - } - b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP, unix.CAP_WRITE}) - if err != nil { - panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) - } - if b { - panic(fmt.Sprintf("Unexpected rights (2)")) - } - - // Can no longer create a file - _, err = unix.Openat(int(f.Fd()), "xx2", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("Openat succeeded") - } - if err != unix.ENOTCAPABLE { - panic(fmt.Sprintf("OpenFileAt failed wrong: %s %#v", err, err)) - } - - // But can read an existing one - _, err = unix.Openat(int(f.Fd()), "xx", os.O_RDONLY, 0666) - if err != nil { - panic(fmt.Sprintf("Openat failed: %s %#v", err, err)) - } - - os.Exit(0) -} - -func init() { - testProcs["openat"] = testProc{ - OpenatTest, - makeTempDir, - removeTempDir, - true, - } -} - -func TestOpenat(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skipf("skipping test on %s", runtime.GOARCH) - } - ExitsCorrectly(t, "openat") -} - -func TestCapRightsSetAndClear(t *testing.T) { - r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT}) - if err != nil { - t.Fatalf("CapRightsInit failed: %s", err) - } - - err = unix.CapRightsSet(r, []uint64{unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsSet failed: %s", err) - } - - b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT, unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsIsSet failed: %s", err) - } - if !b { - t.Fatalf("Wrong rights set") - } - - err = unix.CapRightsClear(r, []uint64{unix.CAP_READ, unix.CAP_PDWAIT}) - if err != nil { - t.Fatalf("CapRightsClear failed: %s", err) - } - - b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_WRITE, unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsIsSet failed: %s", err) - } - if !b { - t.Fatalf("Wrong rights set") - } -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On FreeBSD, each entry consists of a single byte containing the length -// of the attribute name, followed by the attribute name. -// The name is _not_ NULL-terminated. -func stringsFromByteSlice(buf []byte) []string { - var result []string - i := 0 - for i < len(buf) { - next := i + 1 + int(buf[i]) - result = append(result, string(buf[i+1:next])) - i = next - } - return result -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 04f38c53..02cf204a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,11 +61,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -148,8 +148,6 @@ func Unlink(path string) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) -//sys utimes(path string, times *[2]Timeval) (err error) - func Utimes(path string, tv []Timeval) error { if tv == nil { err := utimensat(AT_FDCWD, path, nil, 0) @@ -207,20 +205,14 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } -//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) - func Futimesat(dirfd int, path string, tv []Timeval) error { - pathp, err := BytePtrFromString(path) - if err != nil { - return err - } if tv == nil { - return futimesat(dirfd, pathp, nil) + return futimesat(dirfd, path, nil) } if len(tv) != 2 { return EINVAL } - return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) + return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func Futimes(fd int, tv []Timeval) (err error) { @@ -497,6 +489,47 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil } +// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the RFCOMM protocol. +// +// Server example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) +// +// Client example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) +type SockaddrRFCOMM struct { + // Addr represents a bluetooth address, byte ordering is little-endian. + Addr [6]uint8 + + // Channel is a designated bluetooth channel, only 1-30 are available for use. + // Since Linux 2.6.7 and further zero value is the first available channel. + Channel uint8 + + raw RawSockaddrRFCOMM +} + +func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + sa.raw.Channel = sa.Channel + sa.raw.Bdaddr = sa.Addr + return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil +} + // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // The RxID and TxID fields are used for transport protocol addressing in // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with @@ -659,7 +692,25 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +type SockaddrXDP struct { + Flags uint16 + Ifindex uint32 + QueueID uint32 + SharedUmemFD uint32 + raw RawSockaddrXDP +} + +func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_XDP + sa.raw.Flags = sa.Flags + sa.raw.Ifindex = sa.Ifindex + sa.raw.Queue_id = sa.QueueID + sa.raw.Shared_umem_fd = sa.SharedUmemFD + + return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil +} + +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) @@ -736,6 +787,39 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { Port: pp.Port, } return sa, nil + case AF_BLUETOOTH: + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + // only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections + switch proto { + case BTPROTO_L2CAP: + pp := (*RawSockaddrL2)(unsafe.Pointer(rsa)) + sa := &SockaddrL2{ + PSM: pp.Psm, + CID: pp.Cid, + Addr: pp.Bdaddr, + AddrType: pp.Bdaddr_type, + } + return sa, nil + case BTPROTO_RFCOMM: + pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa)) + sa := &SockaddrRFCOMM{ + Channel: pp.Channel, + Addr: pp.Bdaddr, + } + return sa, nil + } + case AF_XDP: + pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa)) + sa := &SockaddrXDP{ + Flags: pp.Flags, + Ifindex: pp.Ifindex, + QueueID: pp.Queue_id, + SharedUmemFD: pp.Shared_umem_fd, + } + return sa, nil } return nil, EAFNOSUPPORT } @@ -747,7 +831,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -765,7 +849,7 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -779,7 +863,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { @@ -968,7 +1052,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } @@ -1216,24 +1300,27 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sys Adjtimex(buf *Timex) (state int, err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) +//sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Dup(oldfd int) (fd int, err error) //sys Dup3(oldfd int, newfd int, flags int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) //sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2 //sys Exit(code int) = SYS_EXIT_GROUP -//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) //sys Flock(fd int, how int) (err error) +//sys Fremovexattr(fd int, attr string) (err error) +//sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1261,6 +1348,7 @@ func Getpgrp() (pid int) { //sys Llistxattr(path string, dest []byte) (sz int, err error) //sys Lremovexattr(path string, attr string) (err error) //sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) +//sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) @@ -1272,6 +1360,7 @@ func Getpgrp() (pid int) { //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) //sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) //sys Setdomainname(p []byte) (err error) //sys Sethostname(p []byte) (err error) @@ -1306,7 +1395,6 @@ func Setgid(uid int) (err error) { //sysnb Uname(buf *Utsname) (err error) //sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 //sys Unshare(flags int) (err error) -//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ @@ -1356,6 +1444,77 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { return int(n), nil } +//sys faccessat(dirfd int, path string, mode uint32) (err error) + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { + return EINVAL + } + + // The Linux kernel faccessat system call does not take any flags. + // The glibc faccessat implements the flags itself; see + // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD + // Because people naturally expect syscall.Faccessat to act + // like C faccessat, we do the same. + + if flags == 0 { + return faccessat(dirfd, path, mode) + } + + var st Stat_t + if err := Fstatat(dirfd, path, &st, flags&AT_SYMLINK_NOFOLLOW); err != nil { + return err + } + + mode &= 7 + if mode == 0 { + return nil + } + + var uid int + if flags&AT_EACCESS != 0 { + uid = Geteuid() + } else { + uid = Getuid() + } + + if uid == 0 { + if mode&1 == 0 { + // Root can read and write any file. + return nil + } + if st.Mode&0111 != 0 { + // Root can execute any file that anybody can execute. + return nil + } + return EACCES + } + + var fmode uint32 + if uint32(uid) == st.Uid { + fmode = (st.Mode >> 6) & 7 + } else { + var gid int + if flags&AT_EACCESS != 0 { + gid = Getegid() + } else { + gid = Getgid() + } + + if uint32(gid) == st.Gid { + fmode = (st.Mode >> 3) & 7 + } else { + fmode = st.Mode & 7 + } + } + + if fmode&mode == mode { + return nil + } + + return EACCES +} + /* * Unimplemented */ @@ -1365,7 +1524,6 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // Brk // Capget // Capset -// ClockGetres // ClockNanosleep // ClockSettime // Clone @@ -1375,11 +1533,7 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // EpollPwait // EpollWaitOld // Execve -// Fgetxattr -// Flistxattr // Fork -// Fremovexattr -// Fsetxattr // Futex // GetKernelSyms // GetMempolicy diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index f17872f5..74bc098c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -50,6 +50,8 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -77,12 +79,12 @@ func Pipe2(p []int, flags int) (err error) { //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 //sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Pause() (err error) func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { @@ -164,11 +166,11 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return newoffset, nil } -// Vsyscalls on amd64. +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) - //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) // On x86 Linux, all the socket calls go through an extra indirection, // I think because the 5-register system call interface can't handle diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index d1211063..5f9b2454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -7,6 +7,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -57,6 +58,7 @@ func Stat(path string, stat *Stat_t) (err error) { //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -75,6 +77,8 @@ func Stat(path string, stat *Stat_t) (err error) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) + func Gettimeofday(tv *Timeval) (err error) { errno := gettimeofday(tv) if errno != 0 { @@ -96,6 +100,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index c59f8588..3ec7a932 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -75,6 +75,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -86,6 +88,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pause() (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 @@ -97,11 +100,10 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) -// Vsyscalls on amd64. +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Pause() (err error) func Time(t *Time_t) (Time_t, error) { var tv Timeval @@ -123,6 +125,8 @@ func Utime(path string, buf *Utimbuf) error { return Utimes(path, tv) } +//sys utimes(path string, times *[2]Timeval) (err error) + //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index a1e8a609..646f295a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -6,6 +6,15 @@ package unix +import "unsafe" + +func EpollCreate(size int) (fd int, err error) { + if size <= 0 { + return -1, EINVAL + } + return EpollCreate1(0) +} + //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -57,6 +66,11 @@ func Lstat(path string, stat *Stat_t) (err error) { //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -85,6 +99,18 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + func Time(t *Time_t) (Time_t, error) { var tv Timeval err := Gettimeofday(&tv) @@ -105,6 +131,18 @@ func Utime(path string, buf *Utimbuf) error { return Utimes(path, tv) } +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL @@ -161,22 +199,6 @@ func Pause() (err error) { return } -// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove -// these when the deprecated syscalls that the syscall package relies on -// are removed. -const ( - SYS_GETPGRP = 1060 - SYS_UTIMES = 1037 - SYS_FUTIMESAT = 1066 - SYS_PAUSE = 1061 - SYS_USTAT = 1070 - SYS_UTIME = 1063 - SYS_LCHOWN = 1032 - SYS_TIME = 1062 - SYS_EPOLL_CREATE = 1042 - SYS_EPOLL_WAIT = 1069 -) - func Poll(fds []PollFd, timeout int) (n int, err error) { var ts *Timespec if timeout >= 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 090ed404..ad991031 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -8,6 +8,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -47,6 +48,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -65,6 +67,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -80,6 +83,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 9e16cc9d..99e0e999 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,6 +15,8 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 @@ -33,13 +35,12 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) - //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) - //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -61,16 +62,17 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 -//sys Utime(path string, buf *Utimbuf) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Pause() (err error) func Fstatfs(fd int, buf *Statfs_t) (err error) { @@ -122,14 +124,13 @@ func Pipe2(p []int, flags int) (err error) { return } +//sysnb pipe() (p1 int, p2 int, err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - var pp [2]_C_int - err = pipe2(&pp, 0) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + p[0], p[1], err = pipe() return } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 6fb8733d..6a38dfd5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,8 +7,9 @@ package unix -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -43,8 +44,8 @@ package unix //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) -//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2 //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -63,10 +64,11 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) - //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -126,3 +128,11 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +//sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 + +func SyncFileRange(fd int, off int64, n int64, flags int) error { + // The sync_file_range and sync_file_range2 syscalls differ only in the + // order of their arguments. + return syncFileRange2(fd, flags, off, n) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go new file mode 100644 index 00000000..512077fe --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64,linux + +package unix + +import "unsafe" + +func EpollCreate(size int) (fd int, err error) { + if size <= 0 { + return -1, EINVAL + } + return EpollCreate1(0) +} + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Pc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + +func Dup2(oldfd int, newfd int) (err error) { + return Dup3(oldfd, newfd, 0) +} + +func Pause() (err error) { + _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var ts *Timespec + if timeout >= 0 { + ts = new(Timespec) + *ts = NsecToTimespec(int64(timeout) * 1e6) + } + if len(fds) == 0 { + return ppoll(nil, 0, ts, nil) + } + return ppoll(&fds[0], len(fds), ts, nil) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index c0d86e72..6e4ee0cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -11,6 +11,7 @@ import ( ) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -44,9 +45,11 @@ import ( //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sysnb setgroups(n int, list *_Gid_t) (err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -62,6 +65,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 78c1e0df..72e64187 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -68,6 +68,7 @@ func Iopl(level int) (err error) { return ENOSYS } +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -83,6 +84,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go deleted file mode 100644 index 7fd5e2a9..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package unix_test - -import ( - "os" - "runtime" - "runtime/debug" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -func TestIoctlGetInt(t *testing.T) { - f, err := os.Open("/dev/random") - if err != nil { - t.Fatalf("failed to open device: %v", err) - } - defer f.Close() - - v, err := unix.IoctlGetInt(int(f.Fd()), unix.RNDGETENTCNT) - if err != nil { - t.Fatalf("failed to perform ioctl: %v", err) - } - - t.Logf("%d bits of entropy available", v) -} - -func TestPpoll(t *testing.T) { - if runtime.GOOS == "android" { - t.Skip("mkfifo syscall is not available on android, skipping test") - } - - f, cleanup := mktmpfifo(t) - defer cleanup() - - const timeout = 100 * time.Millisecond - - ok := make(chan bool, 1) - go func() { - select { - case <-time.After(10 * timeout): - t.Errorf("Ppoll: failed to timeout after %d", 10*timeout) - case <-ok: - } - }() - - fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} - timeoutTs := unix.NsecToTimespec(int64(timeout)) - n, err := unix.Ppoll(fds, &timeoutTs, nil) - ok <- true - if err != nil { - t.Errorf("Ppoll: unexpected error: %v", err) - return - } - if n != 0 { - t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0) - return - } -} - -func TestTime(t *testing.T) { - var ut unix.Time_t - ut2, err := unix.Time(&ut) - if err != nil { - t.Fatalf("Time: %v", err) - } - if ut != ut2 { - t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut) - } - - var now time.Time - - for i := 0; i < 10; i++ { - ut, err = unix.Time(nil) - if err != nil { - t.Fatalf("Time: %v", err) - } - - now = time.Now() - - if int64(ut) == now.Unix() { - return - } - } - - t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix()) -} - -func TestUtime(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - - buf := &unix.Utimbuf{ - Modtime: 12345, - } - - err := unix.Utime("file1", buf) - if err != nil { - t.Fatalf("Utime: %v", err) - } - - fi, err := os.Stat("file1") - if err != nil { - t.Fatal(err) - } - - if fi.ModTime().Unix() != 12345 { - t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix()) - } -} - -func TestUtimesNanoAt(t *testing.T) { - defer chtmpdir(t)() - - symlink := "symlink1" - os.Remove(symlink) - err := os.Symlink("nonexisting", symlink) - if err != nil { - t.Fatal(err) - } - - ts := []unix.Timespec{ - {Sec: 1111, Nsec: 2222}, - {Sec: 3333, Nsec: 4444}, - } - err = unix.UtimesNanoAt(unix.AT_FDCWD, symlink, ts, unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - t.Fatalf("UtimesNanoAt: %v", err) - } - - var st unix.Stat_t - err = unix.Lstat(symlink, &st) - if err != nil { - t.Fatalf("Lstat: %v", err) - } - if st.Atim != ts[0] { - t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim) - } - if st.Mtim != ts[1] { - t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim) - } -} - -func TestRlimitAs(t *testing.T) { - // disable GC during to avoid flaky test - defer debug.SetGCPercent(debug.SetGCPercent(-1)) - - var rlim unix.Rlimit - err := unix.Getrlimit(unix.RLIMIT_AS, &rlim) - if err != nil { - t.Fatalf("Getrlimit: %v", err) - } - var zero unix.Rlimit - if zero == rlim { - t.Fatalf("Getrlimit: got zero value %#v", rlim) - } - set := rlim - set.Cur = uint64(unix.Getpagesize()) - err = unix.Setrlimit(unix.RLIMIT_AS, &set) - if err != nil { - t.Fatalf("Setrlimit: set failed: %#v %v", set, err) - } - - // RLIMIT_AS was set to the page size, so mmap()'ing twice the page size - // should fail. See 'man 2 getrlimit'. - _, err = unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) - if err == nil { - t.Fatal("Mmap: unexpectedly suceeded after setting RLIMIT_AS") - } - - err = unix.Setrlimit(unix.RLIMIT_AS, &rlim) - if err != nil { - t.Fatalf("Setrlimit: restore failed: %#v %v", rlim, err) - } - - b, err := unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) - if err != nil { - t.Fatalf("Mmap: %v", err) - } - err = unix.Munmap(b) - if err != nil { - t.Fatalf("Munmap: %v", err) - } -} - -func TestSelect(t *testing.T) { - _, err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) - if err != nil { - t.Fatalf("Select: %v", err) - } - - dur := 150 * time.Millisecond - tv := unix.NsecToTimeval(int64(dur)) - start := time.Now() - _, err = unix.Select(0, nil, nil, nil, &tv) - took := time.Since(start) - if err != nil { - t.Fatalf("Select: %v", err) - } - - if took < dur { - t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) - } -} - -func TestPselect(t *testing.T) { - _, err := unix.Pselect(0, nil, nil, nil, &unix.Timespec{Sec: 0, Nsec: 0}, nil) - if err != nil { - t.Fatalf("Pselect: %v", err) - } - - dur := 2500 * time.Microsecond - ts := unix.NsecToTimespec(int64(dur)) - start := time.Now() - _, err = unix.Pselect(0, nil, nil, nil, &ts, nil) - took := time.Since(start) - if err != nil { - t.Fatalf("Pselect: %v", err) - } - - if took < dur { - t.Errorf("Pselect: timeout should have been at least %v, got %v", dur, took) - } -} - -func TestSchedSetaffinity(t *testing.T) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var oldMask unix.CPUSet - err := unix.SchedGetaffinity(0, &oldMask) - if err != nil { - t.Fatalf("SchedGetaffinity: %v", err) - } - - var newMask unix.CPUSet - newMask.Zero() - if newMask.Count() != 0 { - t.Errorf("CpuZero: didn't zero CPU set: %v", newMask) - } - cpu := 1 - newMask.Set(cpu) - if newMask.Count() != 1 || !newMask.IsSet(cpu) { - t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) - } - cpu = 5 - newMask.Set(cpu) - if newMask.Count() != 2 || !newMask.IsSet(cpu) { - t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) - } - newMask.Clear(cpu) - if newMask.Count() != 1 || newMask.IsSet(cpu) { - t.Errorf("CpuClr: didn't clear CPU %d in set: %v", cpu, newMask) - } - - if runtime.NumCPU() < 2 { - t.Skip("skipping setaffinity tests on single CPU system") - } - if runtime.GOOS == "android" { - t.Skip("skipping setaffinity tests on android") - } - - err = unix.SchedSetaffinity(0, &newMask) - if err != nil { - t.Fatalf("SchedSetaffinity: %v", err) - } - - var gotMask unix.CPUSet - err = unix.SchedGetaffinity(0, &gotMask) - if err != nil { - t.Fatalf("SchedGetaffinity: %v", err) - } - - if gotMask != newMask { - t.Errorf("SchedSetaffinity: returned affinity mask does not match set affinity mask") - } - - // Restore old mask so it doesn't affect successive tests - err = unix.SchedSetaffinity(0, &oldMask) - if err != nil { - t.Fatalf("SchedSetaffinity: %v", err) - } -} - -func TestStatx(t *testing.T) { - var stx unix.Statx_t - err := unix.Statx(unix.AT_FDCWD, ".", 0, 0, &stx) - if err == unix.ENOSYS || err == unix.EPERM { - t.Skip("statx syscall is not available, skipping test") - } else if err != nil { - t.Fatalf("Statx: %v", err) - } - - defer chtmpdir(t)() - touch(t, "file1") - - var st unix.Stat_t - err = unix.Stat("file1", &st) - if err != nil { - t.Fatalf("Stat: %v", err) - } - - flags := unix.AT_STATX_SYNC_AS_STAT - err = unix.Statx(unix.AT_FDCWD, "file1", flags, unix.STATX_ALL, &stx) - if err != nil { - t.Fatalf("Statx: %v", err) - } - - if uint32(stx.Mode) != st.Mode { - t.Errorf("Statx: returned stat mode does not match Stat") - } - - ctime := unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} - mtime := unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} - - if stx.Ctime != ctime { - t.Errorf("Statx: returned stat ctime does not match Stat") - } - if stx.Mtime != mtime { - t.Errorf("Statx: returned stat mtime does not match Stat") - } - - err = os.Symlink("file1", "symlink1") - if err != nil { - t.Fatal(err) - } - - err = unix.Lstat("symlink1", &st) - if err != nil { - t.Fatalf("Lstat: %v", err) - } - - err = unix.Statx(unix.AT_FDCWD, "symlink1", flags, unix.STATX_BASIC_STATS, &stx) - if err != nil { - t.Fatalf("Statx: %v", err) - } - - // follow symlink, expect a regulat file - if stx.Mode&unix.S_IFREG == 0 { - t.Errorf("Statx: didn't follow symlink") - } - - err = unix.Statx(unix.AT_FDCWD, "symlink1", flags|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ALL, &stx) - if err != nil { - t.Fatalf("Statx: %v", err) - } - - // follow symlink, expect a symlink - if stx.Mode&unix.S_IFLNK == 0 { - t.Errorf("Statx: unexpectedly followed symlink") - } - if uint32(stx.Mode) != st.Mode { - t.Errorf("Statx: returned stat mode does not match Lstat") - } - - ctime = unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} - mtime = unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} - - if stx.Ctime != ctime { - t.Errorf("Statx: returned stat ctime does not match Lstat") - } - if stx.Mtime != mtime { - t.Errorf("Statx: returned stat mtime does not match Lstat") - } -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) []string { - var result []string - off := 0 - for i, b := range buf { - if b == 0 { - result = append(result, string(buf[off:i])) - off = i + 1 - } - } - return result -} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e1a3baa2..639bcdef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -93,6 +93,23 @@ func nametomib(name string) (mib []_C_int, err error) { return mib, nil } +func SysctlClockinfo(name string) (*Clockinfo, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofClockinfo) + buf := make([]byte, SizeofClockinfo) + if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofClockinfo { + return nil, EIO + } + return (*Clockinfo)(unsafe.Pointer(&buf[0])), nil +} + //sysnb pipe() (fd1 int, fd2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -145,11 +162,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -233,6 +250,19 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) +//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) +//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) +//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) +//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 614fcf04..07e6669c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -113,11 +113,11 @@ func IoctlSetInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func ioctlSetWinsize(fd int, req uint, value *Winsize) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func ioctlSetTermios(fd int, req uint, value *Termios) error { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -201,6 +201,7 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b7629529..53b80782 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -112,7 +112,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } // GetsockoptString returns the string value of the socket option opt for the @@ -314,7 +314,11 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + valptr, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } @@ -356,7 +360,7 @@ func Futimes(fd int, tv []Timeval) error { return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) @@ -407,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if nfd == -1 { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -444,7 +448,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from oobn = int(msg.Accrightslen) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } @@ -536,11 +540,11 @@ func IoctlSetInt(fd int, req uint, value int) (err error) { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) (err error) { +func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) (err error) { +func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } @@ -595,6 +599,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Dup(fd int) (nfd int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_test.go b/vendor/golang.org/x/sys/unix/syscall_solaris_test.go deleted file mode 100644 index 57dba882..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package unix_test - -import ( - "os/exec" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -func TestSelect(t *testing.T) { - err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0}) - if err != nil { - t.Fatalf("Select: %v", err) - } - - dur := 150 * time.Millisecond - tv := unix.NsecToTimeval(int64(dur)) - start := time.Now() - err = unix.Select(0, nil, nil, nil, &tv) - took := time.Since(start) - if err != nil { - t.Fatalf("Select: %v", err) - } - - if took < dur { - t.Errorf("Select: timeout should have been at least %v, got %v", dur, took) - } -} - -func TestStatvfs(t *testing.T) { - if err := unix.Statvfs("", nil); err == nil { - t.Fatal(`Statvfs("") expected failure`) - } - - statvfs := unix.Statvfs_t{} - if err := unix.Statvfs("/", &statvfs); err != nil { - t.Errorf(`Statvfs("/") failed: %v`, err) - } - - if t.Failed() { - mount, err := exec.Command("mount").CombinedOutput() - if err != nil { - t.Logf("mount: %v\n%s", err, mount) - } else { - t.Logf("mount: %s", mount) - } - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go deleted file mode 100644 index a8eef7cf..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func testSetGetenv(t *testing.T, key, value string) { - err := unix.Setenv(key, value) - if err != nil { - t.Fatalf("Setenv failed to set %q: %v", value, err) - } - newvalue, found := unix.Getenv(key) - if !found { - t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) - } - if newvalue != value { - t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) - } -} - -func TestEnv(t *testing.T) { - testSetGetenv(t, "TESTENV", "AVALUE") - // make sure TESTENV gets set to "", not deleted - testSetGetenv(t, "TESTENV", "") -} - -func TestItoa(t *testing.T) { - // Make most negative integer: 0x8000... - i := 1 - for i<<1 != 0 { - i <<= 1 - } - if i >= 0 { - t.Fatal("bad math") - } - s := unix.Itoa(i) - f := fmt.Sprint(i) - if s != f { - t.Fatalf("itoa(%d) = %s, want %s", i, s, f) - } -} - -func TestUname(t *testing.T) { - var utsname unix.Utsname - err := unix.Uname(&utsname) - if err != nil { - t.Fatalf("Uname: %v", err) - } - - t.Logf("OS: %s/%s %s", utsname.Sysname[:], utsname.Machine[:], utsname.Release[:]) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index b835bad0..13956b79 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix @@ -219,7 +219,7 @@ func Getpeername(fd int) (sa Sockaddr, err error) { if err = getpeername(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptByte(fd, level, opt int) (value byte, err error) { @@ -291,7 +291,7 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go deleted file mode 100644 index ad09716a..00000000 --- a/vendor/golang.org/x/sys/unix/syscall_unix_test.go +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix_test - -import ( - "flag" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "syscall" - "testing" - "time" - - "golang.org/x/sys/unix" -) - -// Tests that below functions, structures and constants are consistent -// on all Unix-like systems. -func _() { - // program scheduling priority functions and constants - var ( - _ func(int, int, int) error = unix.Setpriority - _ func(int, int) (int, error) = unix.Getpriority - ) - const ( - _ int = unix.PRIO_USER - _ int = unix.PRIO_PROCESS - _ int = unix.PRIO_PGRP - ) - - // termios constants - const ( - _ int = unix.TCIFLUSH - _ int = unix.TCIOFLUSH - _ int = unix.TCOFLUSH - ) - - // fcntl file locking structure and constants - var ( - _ = unix.Flock_t{ - Type: int16(0), - Whence: int16(0), - Start: int64(0), - Len: int64(0), - Pid: int32(0), - } - ) - const ( - _ = unix.F_GETLK - _ = unix.F_SETLK - _ = unix.F_SETLKW - ) -} - -func TestErrnoSignalName(t *testing.T) { - testErrors := []struct { - num syscall.Errno - name string - }{ - {syscall.EPERM, "EPERM"}, - {syscall.EINVAL, "EINVAL"}, - {syscall.ENOENT, "ENOENT"}, - } - - for _, te := range testErrors { - t.Run(fmt.Sprintf("%d/%s", te.num, te.name), func(t *testing.T) { - e := unix.ErrnoName(te.num) - if e != te.name { - t.Errorf("ErrnoName(%d) returned %s, want %s", te.num, e, te.name) - } - }) - } - - testSignals := []struct { - num syscall.Signal - name string - }{ - {syscall.SIGHUP, "SIGHUP"}, - {syscall.SIGPIPE, "SIGPIPE"}, - {syscall.SIGSEGV, "SIGSEGV"}, - } - - for _, ts := range testSignals { - t.Run(fmt.Sprintf("%d/%s", ts.num, ts.name), func(t *testing.T) { - s := unix.SignalName(ts.num) - if s != ts.name { - t.Errorf("SignalName(%d) returned %s, want %s", ts.num, s, ts.name) - } - }) - } -} - -// TestFcntlFlock tests whether the file locking structure matches -// the calling convention of each kernel. -func TestFcntlFlock(t *testing.T) { - name := filepath.Join(os.TempDir(), "TestFcntlFlock") - fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0) - if err != nil { - t.Fatalf("Open failed: %v", err) - } - defer unix.Unlink(name) - defer unix.Close(fd) - flock := unix.Flock_t{ - Type: unix.F_RDLCK, - Start: 0, Len: 0, Whence: 1, - } - if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil { - t.Fatalf("FcntlFlock failed: %v", err) - } -} - -// TestPassFD tests passing a file descriptor over a Unix socket. -// -// This test involved both a parent and child process. The parent -// process is invoked as a normal test, with "go test", which then -// runs the child process by running the current test binary with args -// "-test.run=^TestPassFD$" and an environment variable used to signal -// that the test should become the child process instead. -func TestPassFD(t *testing.T) { - if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { - t.Skip("cannot exec subprocess on iOS, skipping test") - } - - if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { - passFDChild() - return - } - - tempDir, err := ioutil.TempDir("", "TestPassFD") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) - if err != nil { - t.Fatalf("Socketpair: %v", err) - } - defer unix.Close(fds[0]) - defer unix.Close(fds[1]) - writeFile := os.NewFile(uintptr(fds[0]), "child-writes") - readFile := os.NewFile(uintptr(fds[1]), "parent-reads") - defer writeFile.Close() - defer readFile.Close() - - cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" { - cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp) - } - cmd.ExtraFiles = []*os.File{writeFile} - - out, err := cmd.CombinedOutput() - if len(out) > 0 || err != nil { - t.Fatalf("child process: %q, %v", out, err) - } - - c, err := net.FileConn(readFile) - if err != nil { - t.Fatalf("FileConn: %v", err) - } - defer c.Close() - - uc, ok := c.(*net.UnixConn) - if !ok { - t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c) - } - - buf := make([]byte, 32) // expect 1 byte - oob := make([]byte, 32) // expect 24 bytes - closeUnix := time.AfterFunc(5*time.Second, func() { - t.Logf("timeout reading from unix socket") - uc.Close() - }) - _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) - if err != nil { - t.Fatalf("ReadMsgUnix: %v", err) - } - closeUnix.Stop() - - scms, err := unix.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - t.Fatalf("ParseSocketControlMessage: %v", err) - } - if len(scms) != 1 { - t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms) - } - scm := scms[0] - gotFds, err := unix.ParseUnixRights(&scm) - if err != nil { - t.Fatalf("unix.ParseUnixRights: %v", err) - } - if len(gotFds) != 1 { - t.Fatalf("wanted 1 fd; got %#v", gotFds) - } - - f := os.NewFile(uintptr(gotFds[0]), "fd-from-child") - defer f.Close() - - got, err := ioutil.ReadAll(f) - want := "Hello from child process!\n" - if string(got) != want { - t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want) - } -} - -// passFDChild is the child process used by TestPassFD. -func passFDChild() { - defer os.Exit(0) - - // Look for our fd. It should be fd 3, but we work around an fd leak - // bug here (http://golang.org/issue/2603) to let it be elsewhere. - var uc *net.UnixConn - for fd := uintptr(3); fd <= 10; fd++ { - f := os.NewFile(fd, "unix-conn") - var ok bool - netc, _ := net.FileConn(f) - uc, ok = netc.(*net.UnixConn) - if ok { - break - } - } - if uc == nil { - fmt.Println("failed to find unix fd") - return - } - - // Make a file f to send to our parent process on uc. - // We make it in tempDir, which our parent will clean up. - flag.Parse() - tempDir := flag.Arg(0) - f, err := ioutil.TempFile(tempDir, "") - if err != nil { - fmt.Printf("TempFile: %v", err) - return - } - - f.Write([]byte("Hello from child process!\n")) - f.Seek(0, 0) - - rights := unix.UnixRights(int(f.Fd())) - dummyByte := []byte("x") - n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil) - if err != nil { - fmt.Printf("WriteMsgUnix: %v", err) - return - } - if n != 1 || oobn != len(rights) { - fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights)) - return - } -} - -// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage, -// and ParseUnixRights are able to successfully round-trip lists of file descriptors. -func TestUnixRightsRoundtrip(t *testing.T) { - testCases := [...][][]int{ - {{42}}, - {{1, 2}}, - {{3, 4, 5}}, - {{}}, - {{1, 2}, {3, 4, 5}, {}, {7}}, - } - for _, testCase := range testCases { - b := []byte{} - var n int - for _, fds := range testCase { - // Last assignment to n wins - n = len(b) + unix.CmsgLen(4*len(fds)) - b = append(b, unix.UnixRights(fds...)...) - } - // Truncate b - b = b[:n] - - scms, err := unix.ParseSocketControlMessage(b) - if err != nil { - t.Fatalf("ParseSocketControlMessage: %v", err) - } - if len(scms) != len(testCase) { - t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms) - } - for i, scm := range scms { - gotFds, err := unix.ParseUnixRights(&scm) - if err != nil { - t.Fatalf("ParseUnixRights: %v", err) - } - wantFds := testCase[i] - if len(gotFds) != len(wantFds) { - t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds) - } - for j, fd := range gotFds { - if fd != wantFds[j] { - t.Fatalf("expected fd %v, got %v", wantFds[j], fd) - } - } - } - } -} - -func TestRlimit(t *testing.T) { - var rlimit, zero unix.Rlimit - err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) - if err != nil { - t.Fatalf("Getrlimit: save failed: %v", err) - } - if zero == rlimit { - t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit) - } - set := rlimit - set.Cur = set.Max - 1 - err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set) - if err != nil { - t.Fatalf("Setrlimit: set failed: %#v %v", set, err) - } - var get unix.Rlimit - err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get) - if err != nil { - t.Fatalf("Getrlimit: get failed: %v", err) - } - set = rlimit - set.Cur = set.Max - 1 - if set != get { - // Seems like Darwin requires some privilege to - // increase the soft limit of rlimit sandbox, though - // Setrlimit never reports an error. - switch runtime.GOOS { - case "darwin": - default: - t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) - } - } - err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) - if err != nil { - t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err) - } -} - -func TestSeekFailure(t *testing.T) { - _, err := unix.Seek(-1, 0, 0) - if err == nil { - t.Fatalf("Seek(-1, 0, 0) did not fail") - } - str := err.Error() // used to crash on Linux - t.Logf("Seek: %v", str) - if str == "" { - t.Fatalf("Seek(-1, 0, 0) return error with empty message") - } -} - -func TestDup(t *testing.T) { - file, err := ioutil.TempFile("", "TestDup") - if err != nil { - t.Fatalf("Tempfile failed: %v", err) - } - defer os.Remove(file.Name()) - defer file.Close() - f := int(file.Fd()) - - newFd, err := unix.Dup(f) - if err != nil { - t.Fatalf("Dup: %v", err) - } - - err = unix.Dup2(newFd, newFd+1) - if err != nil { - t.Fatalf("Dup2: %v", err) - } - - b1 := []byte("Test123") - b2 := make([]byte, 7) - _, err = unix.Write(newFd+1, b1) - if err != nil { - t.Fatalf("Write to dup2 fd failed: %v", err) - } - _, err = unix.Seek(f, 0, 0) - if err != nil { - t.Fatalf("Seek failed: %v", err) - } - _, err = unix.Read(f, b2) - if err != nil { - t.Fatalf("Read back failed: %v", err) - } - if string(b1) != string(b2) { - t.Errorf("Dup: stdout write not in file, expected %v, got %v", string(b1), string(b2)) - } -} - -func TestPoll(t *testing.T) { - if runtime.GOOS == "android" || - (runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64")) { - t.Skip("mkfifo syscall is not available on android and iOS, skipping test") - } - - f, cleanup := mktmpfifo(t) - defer cleanup() - - const timeout = 100 - - ok := make(chan bool, 1) - go func() { - select { - case <-time.After(10 * timeout * time.Millisecond): - t.Errorf("Poll: failed to timeout after %d milliseconds", 10*timeout) - case <-ok: - } - }() - - fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} - n, err := unix.Poll(fds, timeout) - ok <- true - if err != nil { - t.Errorf("Poll: unexpected error: %v", err) - return - } - if n != 0 { - t.Errorf("Poll: wrong number of events: got %v, expected %v", n, 0) - return - } -} - -func TestGetwd(t *testing.T) { - fd, err := os.Open(".") - if err != nil { - t.Fatalf("Open .: %s", err) - } - defer fd.Close() - // These are chosen carefully not to be symlinks on a Mac - // (unlike, say, /var, /etc) - dirs := []string{"/", "/usr/bin"} - switch runtime.GOOS { - case "android": - dirs = []string{"/", "/system/bin"} - case "darwin": - switch runtime.GOARCH { - case "arm", "arm64": - d1, err := ioutil.TempDir("", "d1") - if err != nil { - t.Fatalf("TempDir: %v", err) - } - d2, err := ioutil.TempDir("", "d2") - if err != nil { - t.Fatalf("TempDir: %v", err) - } - dirs = []string{d1, d2} - } - } - oldwd := os.Getenv("PWD") - for _, d := range dirs { - err = os.Chdir(d) - if err != nil { - t.Fatalf("Chdir: %v", err) - } - pwd, err := unix.Getwd() - if err != nil { - t.Fatalf("Getwd in %s: %s", d, err) - } - os.Setenv("PWD", oldwd) - err = fd.Chdir() - if err != nil { - // We changed the current directory and cannot go back. - // Don't let the tests continue; they'll scribble - // all over some other directory. - fmt.Fprintf(os.Stderr, "fchdir back to dot failed: %s\n", err) - os.Exit(1) - } - if pwd != d { - t.Fatalf("Getwd returned %q want %q", pwd, d) - } - } -} - -func TestFstatat(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - - var st1 unix.Stat_t - err := unix.Stat("file1", &st1) - if err != nil { - t.Fatalf("Stat: %v", err) - } - - var st2 unix.Stat_t - err = unix.Fstatat(unix.AT_FDCWD, "file1", &st2, 0) - if err != nil { - t.Fatalf("Fstatat: %v", err) - } - - if st1 != st2 { - t.Errorf("Fstatat: returned stat does not match Stat") - } - - err = os.Symlink("file1", "symlink1") - if err != nil { - t.Fatal(err) - } - - err = unix.Lstat("symlink1", &st1) - if err != nil { - t.Fatalf("Lstat: %v", err) - } - - err = unix.Fstatat(unix.AT_FDCWD, "symlink1", &st2, unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - t.Fatalf("Fstatat: %v", err) - } - - if st1 != st2 { - t.Errorf("Fstatat: returned stat does not match Lstat") - } -} - -func TestFchmodat(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - err := os.Symlink("file1", "symlink1") - if err != nil { - t.Fatal(err) - } - - mode := os.FileMode(0444) - err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), 0) - if err != nil { - t.Fatalf("Fchmodat: unexpected error: %v", err) - } - - fi, err := os.Stat("file1") - if err != nil { - t.Fatal(err) - } - - if fi.Mode() != mode { - t.Errorf("Fchmodat: failed to change file mode: expected %v, got %v", mode, fi.Mode()) - } - - mode = os.FileMode(0644) - didChmodSymlink := true - err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - if (runtime.GOOS == "android" || runtime.GOOS == "linux" || runtime.GOOS == "solaris") && err == unix.EOPNOTSUPP { - // Linux and Illumos don't support flags != 0 - didChmodSymlink = false - } else { - t.Fatalf("Fchmodat: unexpected error: %v", err) - } - } - - if !didChmodSymlink { - // Didn't change mode of the symlink. On Linux, the permissions - // of a symbolic link are always 0777 according to symlink(7) - mode = os.FileMode(0777) - } - - var st unix.Stat_t - err = unix.Lstat("symlink1", &st) - if err != nil { - t.Fatal(err) - } - - got := os.FileMode(st.Mode & 0777) - if got != mode { - t.Errorf("Fchmodat: failed to change symlink mode: expected %v, got %v", mode, got) - } -} - -func TestMkdev(t *testing.T) { - major := uint32(42) - minor := uint32(7) - dev := unix.Mkdev(major, minor) - - if unix.Major(dev) != major { - t.Errorf("Major(%#x) == %d, want %d", dev, unix.Major(dev), major) - } - if unix.Minor(dev) != minor { - t.Errorf("Minor(%#x) == %d, want %d", dev, unix.Minor(dev), minor) - } -} - -// mktmpfifo creates a temporary FIFO and provides a cleanup function. -func mktmpfifo(t *testing.T) (*os.File, func()) { - err := unix.Mkfifo("fifo", 0666) - if err != nil { - t.Fatalf("mktmpfifo: failed to create FIFO: %v", err) - } - - f, err := os.OpenFile("fifo", os.O_RDWR, 0666) - if err != nil { - os.Remove("fifo") - t.Fatalf("mktmpfifo: failed to open FIFO: %v", err) - } - - return f, func() { - f.Close() - os.Remove("fifo") - } -} - -// utilities taken from os/os_test.go - -func touch(t *testing.T, name string) { - f, err := os.Create(name) - if err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } -} - -// chtmpdir changes the working directory to a new temporary directory and -// provides a cleanup function. Used when PWD is read-only. -func chtmpdir(t *testing.T) func() { - oldwd, err := os.Getwd() - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - d, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - if err := os.Chdir(d); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - return func() { - if err := os.Chdir(oldwd); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - os.RemoveAll(d) - } -} diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 47b9011e..4a672f56 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct_test.go b/vendor/golang.org/x/sys/unix/timestruct_test.go deleted file mode 100644 index 4215f46d..00000000 --- a/vendor/golang.org/x/sys/unix/timestruct_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017 The Go Authors. All right reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix_test - -import ( - "testing" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -func TestTimeToTimespec(t *testing.T) { - timeTests := []struct { - time time.Time - valid bool - }{ - {time.Unix(0, 0), true}, - {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), true}, - {time.Date(2262, time.December, 31, 23, 0, 0, 0, time.UTC), false}, - {time.Unix(0x7FFFFFFF, 0), true}, - {time.Unix(0x80000000, 0), false}, - {time.Unix(0x7FFFFFFF, 1000000000), false}, - {time.Unix(0x7FFFFFFF, 999999999), true}, - {time.Unix(-0x80000000, 0), true}, - {time.Unix(-0x80000001, 0), false}, - {time.Date(2038, time.January, 19, 3, 14, 7, 0, time.UTC), true}, - {time.Date(2038, time.January, 19, 3, 14, 8, 0, time.UTC), false}, - {time.Date(1901, time.December, 13, 20, 45, 52, 0, time.UTC), true}, - {time.Date(1901, time.December, 13, 20, 45, 51, 0, time.UTC), false}, - } - - // Currently all targets have either int32 or int64 for Timespec.Sec. - // If there were a new target with unsigned or floating point type for - // it, this test must be adjusted. - have64BitTime := (unsafe.Sizeof(unix.Timespec{}.Sec) == 8) - for _, tt := range timeTests { - ts, err := unix.TimeToTimespec(tt.time) - tt.valid = tt.valid || have64BitTime - if tt.valid && err != nil { - t.Errorf("TimeToTimespec(%v): %v", tt.time, err) - } - if err == nil { - tstime := time.Unix(int64(ts.Sec), int64(ts.Nsec)) - if !tstime.Equal(tt.time) { - t.Errorf("TimeToTimespec(%v) is the time %v", tt.time, tstime) - } - } - } -} diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go new file mode 100644 index 00000000..18fbddd5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_aix.go @@ -0,0 +1,236 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore +// +build aix + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + + +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type off64 C.off64_t +type off C.off_t +type Mode_t C.mode_t + +// Time + +type Timespec C.struct_timespec + +type StTimespec C.struct_st_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Timex C.struct_timex + +type Time_t C.time_t + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +type Timezone C.struct_timezone + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit64 + +type Pid_t C.pid_t + +type _Gid_t C.gid_t + +type dev_t C.dev_t + +// Files + +type Stat_t C.struct_stat + +type StatxTimestamp C.struct_statx_timestamp + +type Statx_t C.struct_statx + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Cmsghdr C.struct_cmsghdr + +type ICMPv6Filter C.struct_icmp6_filter + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type Linger C.struct_linger + +type Msghdr C.struct_msghdr + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr +) + +type IfMsgHdr C.struct_if_msghdr + +// Misc + +type FdSet C.fd_set + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +type Sigset_t C.sigset_t + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +//poll + +type PollFd struct { + Fd int32 + Events uint16 + Revents uint16 +} + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +//flock_t + +type Flock_t C.struct_flock64 + +// Statfs + +type Fsid_t C.struct_fsid_t +type Fsid64_t C.struct_fsid64_t + +type Statfs_t C.struct_statfs + +const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go index 0c633048..386d5f89 100644 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -100,23 +100,6 @@ type _Gid_t C.gid_t // Files -const ( // Directory mode bits - S_IFMT = C.S_IFMT - S_IFIFO = C.S_IFIFO - S_IFCHR = C.S_IFCHR - S_IFDIR = C.S_IFDIR - S_IFBLK = C.S_IFBLK - S_IFREG = C.S_IFREG - S_IFLNK = C.S_IFLNK - S_IFSOCK = C.S_IFSOCK - S_ISUID = C.S_ISUID - S_ISGID = C.S_ISGID - S_ISVTX = C.S_ISVTX - S_IRUSR = C.S_IRUSR - S_IWUSR = C.S_IWUSR - S_IXUSR = C.S_IXUSR -) - type Stat_t C.struct_stat type Statfs_t C.struct_statfs diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go index 4eb02cd4..e84a892d 100644 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -189,23 +189,6 @@ type _Gid_t C.gid_t // Files -const ( // Directory mode bits - S_IFMT = C.S_IFMT - S_IFIFO = C.S_IFIFO - S_IFCHR = C.S_IFCHR - S_IFDIR = C.S_IFDIR - S_IFBLK = C.S_IFBLK - S_IFREG = C.S_IFREG - S_IFLNK = C.S_IFLNK - S_IFSOCK = C.S_IFSOCK - S_ISUID = C.S_ISUID - S_ISGID = C.S_ISGID - S_ISVTX = C.S_ISVTX - S_IRUSR = C.S_IRUSR - S_IWUSR = C.S_IWUSR - S_IXUSR = C.S_IXUSR -) - type Stat_t C.struct_stat8 type Statfs_t C.struct_statfs diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index 1494aafc..c49621ce 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -279,3 +279,9 @@ type Sysctlnode C.struct_sysctlnode // Uname type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 649e5599..8f2fe704 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -101,23 +101,6 @@ type _Gid_t C.gid_t // Files -const ( // Directory mode bits - S_IFMT = C.S_IFMT - S_IFIFO = C.S_IFIFO - S_IFCHR = C.S_IFCHR - S_IFDIR = C.S_IFDIR - S_IFBLK = C.S_IFBLK - S_IFREG = C.S_IFREG - S_IFLNK = C.S_IFLNK - S_IFSOCK = C.S_IFSOCK - S_ISUID = C.S_ISUID - S_ISGID = C.S_ISGID - S_ISVTX = C.S_ISVTX - S_IRUSR = C.S_IRUSR - S_IWUSR = C.S_IWUSR - S_IXUSR = C.S_IXUSR -) - type Stat_t C.struct_stat type Statfs_t C.struct_statfs diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index f7771554..8cef71bd 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -118,23 +118,6 @@ type _Gid_t C.gid_t // Files -const ( // Directory mode bits - S_IFMT = C.S_IFMT - S_IFIFO = C.S_IFIFO - S_IFCHR = C.S_IFCHR - S_IFDIR = C.S_IFDIR - S_IFBLK = C.S_IFBLK - S_IFREG = C.S_IFREG - S_IFLNK = C.S_IFLNK - S_IFSOCK = C.S_IFSOCK - S_ISUID = C.S_ISUID - S_ISGID = C.S_ISGID - S_ISVTX = C.S_ISVTX - S_IRUSR = C.S_IRUSR - S_IWUSR = C.S_IWUSR - S_IXUSR = C.S_IXUSR -) - type Stat_t C.struct_stat type Flock_t C.struct_flock diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go new file mode 100644 index 00000000..93049932 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -0,0 +1,231 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd + +package unix + +import ( + "strings" + "unsafe" +) + +// Derive extattr namespace and attribute name + +func xattrnamespace(fullattr string) (ns int, attr string, err error) { + s := strings.IndexByte(fullattr, '.') + if s == -1 { + return -1, "", ENOATTR + } + + namespace := fullattr[0:s] + attr = fullattr[s+1:] + + switch namespace { + case "user": + return EXTATTR_NAMESPACE_USER, attr, nil + case "system": + return EXTATTR_NAMESPACE_SYSTEM, attr, nil + default: + return -1, "", ENOATTR + } +} + +func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { + if len(dest) > idx { + return unsafe.Pointer(&dest[idx]) + } else { + return unsafe.Pointer(_zero) + } +} + +// FreeBSD and NetBSD implement their own syscalls to handle extended attributes + +func Getxattr(file string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFile(file, nsid, a, uintptr(d), destsize) +} + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize) +} + +func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsize := len(dest) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return -1, err + } + + return ExtattrGetLink(link, nsid, a, uintptr(d), destsize) +} + +// flags are unused on FreeBSD + +func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz) + return +} + +func Setxattr(file string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz) + return +} + +func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { + d := unsafe.Pointer(&data[0]) + datasiz := len(data) + + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz) + return +} + +func Removexattr(file string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFile(file, nsid, a) + return +} + +func Fremovexattr(fd int, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteFd(fd, nsid, a) + return +} + +func Lremovexattr(link string, attr string) (err error) { + nsid, a, err := xattrnamespace(attr) + if err != nil { + return + } + + err = ExtattrDeleteLink(link, nsid, a) + return +} + +func Listxattr(file string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + // FreeBSD won't allow you to list xattrs from multiple namespaces + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + + /* Errors accessing system attrs are ignored so that + * we can implement the Linux-like behavior of omitting errors that + * we don't have read permissions on + * + * Linux will still error if we ask for user attributes on a file that + * we don't have read permissions on, so don't ignore those errors + */ + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s := 0 + for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { + stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } else if e != nil { + return s, e + } + + s += stmp + destsiz -= s + if destsiz < 0 { + destsiz = 0 + } + d = initxattrdest(dest, s) + } + + return s, nil +} diff --git a/vendor/golang.org/x/sys/unix/xattr_test.go b/vendor/golang.org/x/sys/unix/xattr_test.go deleted file mode 100644 index b8b28d0c..00000000 --- a/vendor/golang.org/x/sys/unix/xattr_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd linux - -package unix_test - -import ( - "os" - "runtime" - "strings" - "testing" - - "golang.org/x/sys/unix" -) - -func TestXattr(t *testing.T) { - defer chtmpdir(t)() - - f := "xattr1" - touch(t, f) - - xattrName := "user.test" - xattrDataSet := "gopher" - err := unix.Setxattr(f, xattrName, []byte(xattrDataSet), 0) - if err == unix.ENOTSUP || err == unix.EOPNOTSUPP { - t.Skip("filesystem does not support extended attributes, skipping test") - } else if err != nil { - t.Fatalf("Setxattr: %v", err) - } - - // find size - size, err := unix.Listxattr(f, nil) - if err != nil { - t.Fatalf("Listxattr: %v", err) - } - - if size <= 0 { - t.Fatalf("Listxattr returned an empty list of attributes") - } - - buf := make([]byte, size) - read, err := unix.Listxattr(f, buf) - if err != nil { - t.Fatalf("Listxattr: %v", err) - } - - xattrs := stringsFromByteSlice(buf[:read]) - - xattrWant := xattrName - if runtime.GOOS == "freebsd" { - // On FreeBSD, the namespace is stored separately from the xattr - // name and Listxattr doesn't return the namespace prefix. - xattrWant = strings.TrimPrefix(xattrWant, "user.") - } - found := false - for _, name := range xattrs { - if name == xattrWant { - found = true - } - } - - if !found { - t.Errorf("Listxattr did not return previously set attribute '%s'", xattrName) - } - - // find size - size, err = unix.Getxattr(f, xattrName, nil) - if err != nil { - t.Fatalf("Getxattr: %v", err) - } - - if size <= 0 { - t.Fatalf("Getxattr returned an empty attribute") - } - - xattrDataGet := make([]byte, size) - _, err = unix.Getxattr(f, xattrName, xattrDataGet) - if err != nil { - t.Fatalf("Getxattr: %v", err) - } - - got := string(xattrDataGet) - if got != xattrDataSet { - t.Errorf("Getxattr: expected attribute value %s, got %s", xattrDataSet, got) - } - - err = unix.Removexattr(f, xattrName) - if err != nil { - t.Fatalf("Removexattr: %v", err) - } - - n := "nonexistent" - err = unix.Lsetxattr(n, xattrName, []byte(xattrDataSet), 0) - if err != unix.ENOENT { - t.Errorf("Lsetxattr: expected %v on non-existent file, got %v", unix.ENOENT, err) - } - - _, err = unix.Lgetxattr(n, xattrName, nil) - if err != unix.ENOENT { - t.Errorf("Lgetxattr: %v", err) - } - - s := "symlink1" - err = os.Symlink(n, s) - if err != nil { - t.Fatal(err) - } - - err = unix.Lsetxattr(s, xattrName, []byte(xattrDataSet), 0) - if err != nil { - // Linux and Android doen't support xattrs on symlinks according - // to xattr(7), so just test that we get the proper error. - if (runtime.GOOS != "linux" && runtime.GOOS != "android") || err != unix.EPERM { - t.Fatalf("Lsetxattr: %v", err) - } - } -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go new file mode 100644 index 00000000..4b7b9650 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -0,0 +1,1372 @@ +// mkerrors.sh -maix32 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc,aix + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -maix32 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BYPASS = 0x19 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_INTF = 0x14 + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x1e + AF_NDD = 0x17 + AF_NETWARE = 0x16 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_RIF = 0x15 + AF_ROUTE = 0x11 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x400000 + ARPHRD_802_3 = 0x6 + ARPHRD_802_5 = 0x6 + ARPHRD_ETHER = 0x1 + ARPHRD_FDDI = 0x1 + B0 = 0x0 + B110 = 0x3 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2400 = 0xb + B300 = 0x7 + B38400 = 0xf + B4800 = 0xc + B50 = 0x1 + B600 = 0x8 + B75 = 0x2 + B9600 = 0xd + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x1000 + BSDLY = 0x1000 + CAP_AACCT = 0x6 + CAP_ARM_APPLICATION = 0x5 + CAP_BYPASS_RAC_VMM = 0x3 + CAP_CLEAR = 0x0 + CAP_CREDENTIALS = 0x7 + CAP_EFFECTIVE = 0x1 + CAP_EWLM_AGENT = 0x4 + CAP_INHERITABLE = 0x2 + CAP_MAXIMUM = 0x7 + CAP_NUMA_ATTACH = 0x2 + CAP_PERMITTED = 0x3 + CAP_PROPAGATE = 0x1 + CAP_PROPOGATE = 0x1 + CAP_SET = 0x1 + CBAUD = 0xf + CFLUSH = 0xf + CIBAUD = 0xf0000 + CLOCAL = 0x800 + CLOCK_MONOTONIC = 0xa + CLOCK_PROCESS_CPUTIME_ID = 0xb + CLOCK_REALTIME = 0x9 + CLOCK_THREAD_CPUTIME_ID = 0xc + CR0 = 0x0 + CR1 = 0x100 + CR2 = 0x200 + CR3 = 0x300 + CRDLY = 0x300 + CREAD = 0x80 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIOCGIFCONF = -0x3ff796dc + CSIZE = 0x30 + CSMAP_DIR = "/usr/lib/nls/csmap/" + CSTART = '\021' + CSTOP = '\023' + CSTOPB = 0x40 + CSUSP = 0x1a + ECHO = 0x8 + ECHOCTL = 0x20000 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x80000 + ECHONL = 0x40 + ECHOPRT = 0x40000 + ECH_ICMPID = 0x2 + ETHERNET_CSMACD = 0x6 + EVENP = 0x80 + EXCONTINUE = 0x0 + EXDLOK = 0x3 + EXIO = 0x2 + EXPGIO = 0x0 + EXRESUME = 0x2 + EXRETURN = 0x1 + EXSIG = 0x4 + EXTA = 0xe + EXTB = 0xf + EXTRAP = 0x1 + EYEC_RTENTRYA = 0x257274656e747241 + EYEC_RTENTRYF = 0x257274656e747246 + E_ACC = 0x0 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0xfffe + FF0 = 0x0 + FF1 = 0x2000 + FFDLY = 0x2000 + FLUSHBAND = 0x40 + FLUSHLOW = 0x8 + FLUSHO = 0x100000 + FLUSHR = 0x1 + FLUSHRW = 0x3 + FLUSHW = 0x2 + F_CLOSEM = 0xa + F_DUP2FD = 0xe + F_DUPFD = 0x0 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x5 + F_GETLK64 = 0xb + F_GETOWN = 0x8 + F_LOCK = 0x1 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x6 + F_SETLK64 = 0xc + F_SETLKW = 0x7 + F_SETLKW64 = 0xd + F_SETOWN = 0x9 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_TSTLK = 0xf + F_ULOCK = 0x0 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMP6_FILTER = 0x26 + ICMP6_SEC_SEND_DEL = 0x46 + ICMP6_SEC_SEND_GET = 0x47 + ICMP6_SEC_SEND_SET = 0x44 + ICMP6_SEC_SEND_SET_CGA_ADDR = 0x45 + ICRNL = 0x100 + IEXTEN = 0x200000 + IFA_FIRSTALIAS = 0x2000 + IFA_ROUTE = 0x1 + IFF_64BIT = 0x4000000 + IFF_ALLCAST = 0x20000 + IFF_ALLMULTI = 0x200 + IFF_BPF = 0x8000000 + IFF_BRIDGE = 0x40000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x80c52 + IFF_CHECKSUM_OFFLOAD = 0x10000000 + IFF_D1 = 0x8000 + IFF_D2 = 0x4000 + IFF_D3 = 0x2000 + IFF_D4 = 0x1000 + IFF_DEBUG = 0x4 + IFF_DEVHEALTH = 0x4000 + IFF_DO_HW_LOOPBACK = 0x10000 + IFF_GROUP_ROUTING = 0x2000000 + IFF_IFBUFMGT = 0x800000 + IFF_LINK0 = 0x100000 + IFF_LINK1 = 0x200000 + IFF_LINK2 = 0x400000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x80000 + IFF_NOARP = 0x80 + IFF_NOECHO = 0x800 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_PSEG = 0x40000000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SNAP = 0x8000 + IFF_TCP_DISABLE_CKSUM = 0x20000000 + IFF_TCP_NOCKSUM = 0x1000000 + IFF_UP = 0x1 + IFF_VIPA = 0x80000000 + IFNAMSIZ = 0x10 + IFO_FLUSH = 0x1 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_CEPT = 0x13 + IFT_CLUSTER = 0x3e + IFT_DS3 = 0x1e + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FCS = 0x3a + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIFTUNNEL = 0x3c + IFT_HDH1822 = 0x3 + IFT_HF = 0x3d + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IB = 0xc7 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SN = 0x38 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SP = 0x39 + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TUNNEL = 0x3b + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_VIPA = 0x37 + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x10000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_USE = 0x1 + IPPROTO_AH = 0x33 + IPPROTO_BIP = 0x53 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GIF = 0x8c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_LOCAL = 0x3f + IPPROTO_MAX = 0x100 + IPPROTO_MH = 0x87 + IPPROTO_NONE = 0x3b + IPPROTO_PUP = 0xc + IPPROTO_QOS = 0x2d + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_ADDRFORM = 0x16 + IPV6_ADDR_PREFERENCES = 0x4a + IPV6_ADD_MEMBERSHIP = 0xc + IPV6_AIXRAWSOCKET = 0x39 + IPV6_CHECKSUM = 0x27 + IPV6_DONTFRAG = 0x2d + IPV6_DROP_MEMBERSHIP = 0xd + IPV6_DSTOPTS = 0x36 + IPV6_FLOWINFO_FLOWLABEL = 0xffffff + IPV6_FLOWINFO_PRIFLOW = 0xfffffff + IPV6_FLOWINFO_PRIORITY = 0xf000000 + IPV6_FLOWINFO_SRFLAG = 0x10000000 + IPV6_FLOWINFO_VERSION = 0xf0000000 + IPV6_HOPLIMIT = 0x28 + IPV6_HOPOPTS = 0x34 + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MIPDSTOPTS = 0x36 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_NOPROBE = 0x1c + IPV6_PATHMTU = 0x2e + IPV6_PKTINFO = 0x21 + IPV6_PKTOPTIONS = 0x24 + IPV6_PRIORITY_10 = 0xa000000 + IPV6_PRIORITY_11 = 0xb000000 + IPV6_PRIORITY_12 = 0xc000000 + IPV6_PRIORITY_13 = 0xd000000 + IPV6_PRIORITY_14 = 0xe000000 + IPV6_PRIORITY_15 = 0xf000000 + IPV6_PRIORITY_8 = 0x8000000 + IPV6_PRIORITY_9 = 0x9000000 + IPV6_PRIORITY_BULK = 0x4000000 + IPV6_PRIORITY_CONTROL = 0x7000000 + IPV6_PRIORITY_FILLER = 0x1000000 + IPV6_PRIORITY_INTERACTIVE = 0x6000000 + IPV6_PRIORITY_RESERVED1 = 0x3000000 + IPV6_PRIORITY_RESERVED2 = 0x5000000 + IPV6_PRIORITY_UNATTENDED = 0x2000000 + IPV6_PRIORITY_UNCHARACTERIZED = 0x0 + IPV6_RECVDSTOPTS = 0x38 + IPV6_RECVHOPLIMIT = 0x29 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVHOPS = 0x22 + IPV6_RECVIF = 0x1e + IPV6_RECVPATHMTU = 0x2f + IPV6_RECVPKTINFO = 0x23 + IPV6_RECVRTHDR = 0x33 + IPV6_RECVSRCRT = 0x1d + IPV6_RECVTCLASS = 0x2a + IPV6_RTHDR = 0x32 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RTHDR_TYPE_2 = 0x2 + IPV6_SENDIF = 0x1f + IPV6_SRFLAG_LOOSE = 0x0 + IPV6_SRFLAG_STRICT = 0x10000000 + IPV6_TCLASS = 0x2b + IPV6_TOKEN_LENGTH = 0x40 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2c + IPV6_V6ONLY = 0x25 + IPV6_VERSION = 0x60000000 + IP_ADDRFORM = 0x16 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x3c + IP_BLOCK_SOURCE = 0x3a + IP_BROADCAST_IF = 0x10 + IP_CACHE_LINE_SIZE = 0x80 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DHCPMODE = 0x11 + IP_DONTFRAG = 0x19 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x3d + IP_FINDPMTU = 0x1a + IP_HDRINCL = 0x2 + IP_INC_MEMBERSHIPS = 0x14 + IP_INIT_MEMBERSHIP = 0x14 + IP_MAXPACKET = 0xffff + IP_MF = 0x2000 + IP_MSS = 0x240 + IP_MULTICAST_HOPS = 0xa + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OPT = 0x1b + IP_OPTIONS = 0x1 + IP_PMTUAGE = 0x1b + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVIFINFO = 0xf + IP_RECVINTERFACE = 0x20 + IP_RECVMACHDR = 0xe + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x22 + IP_RETOPTS = 0x8 + IP_SOURCE_FILTER = 0x48 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x3b + IP_UNICAST_HOPS = 0x4 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x800 + IXANY = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + I_FLUSH = 0x20005305 + LNOFLSH = 0x8000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x10 + MAP_ANONYMOUS = 0x10 + MAP_FILE = 0x0 + MAP_FIXED = 0x100 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_TYPE = 0xf0 + MAP_VARIABLE = 0x0 + MCL_CURRENT = 0x100 + MCL_FUTURE = 0x200 + MSG_ANY = 0x4 + MSG_ARGEXT = 0x400 + MSG_BAND = 0x2 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOR = 0x8 + MSG_HIPRI = 0x1 + MSG_MAXIOVLEN = 0x10 + MSG_MPEG2 = 0x80 + MSG_NONBLOCK = 0x4000 + MSG_NOSIGNAL = 0x100 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x200 + MS_ASYNC = 0x10 + MS_EINTR = 0x80 + MS_INVALIDATE = 0x40 + MS_PER_SEC = 0x3e8 + MS_SYNC = 0x20 + NL0 = 0x0 + NL1 = 0x4000 + NL2 = 0x8000 + NL3 = 0xc000 + NLDLY = 0x4000 + NOFLSH = 0x80 + NOFLUSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + ONOEOT = 0x80000 + OPOST = 0x1 + OXTABS = 0x40000 + O_ACCMODE = 0x23 + O_APPEND = 0x8 + O_CIO = 0x80 + O_CIOR = 0x800000000 + O_CLOEXEC = 0x800000 + O_CREAT = 0x100 + O_DEFER = 0x2000 + O_DELAY = 0x4000 + O_DIRECT = 0x8000000 + O_DIRECTORY = 0x80000 + O_DSYNC = 0x400000 + O_EFSOFF = 0x400000000 + O_EFSON = 0x200000000 + O_EXCL = 0x400 + O_EXEC = 0x20 + O_LARGEFILE = 0x4000000 + O_NDELAY = 0x8000 + O_NOCACHE = 0x100000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x1000000 + O_NONBLOCK = 0x4 + O_NONE = 0x3 + O_NSHARE = 0x10000 + O_RAW = 0x100000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSHARE = 0x1000 + O_RSYNC = 0x200000 + O_SEARCH = 0x20 + O_SNAPSHOT = 0x40 + O_SYNC = 0x10 + O_TRUNC = 0x200 + O_TTY_INIT = 0x0 + O_WRONLY = 0x1 + PARENB = 0x100 + PAREXT = 0x100000 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_64BIT = 0x20 + PR_ADDR = 0x2 + PR_ARGEXT = 0x400 + PR_ATOMIC = 0x1 + PR_CONNREQUIRED = 0x4 + PR_FASTHZ = 0x5 + PR_INP = 0x40 + PR_INTRLEVEL = 0x8000 + PR_MLS = 0x100 + PR_MLS_1_LABEL = 0x200 + PR_NOEOR = 0x4000 + PR_RIGHTS = 0x10 + PR_SLOWHZ = 0x2 + PR_WANTRCVD = 0x8 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x9 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DOWNSTREAM = 0x100 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTC_IA64 = 0x3 + RTC_POWER = 0x1 + RTC_POWER_PC = 0x2 + RTF_ACTIVE_DGD = 0x1000000 + RTF_BCE = 0x80000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_BUL = 0x2000 + RTF_CLONE = 0x10000 + RTF_CLONED = 0x20000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FREE_IN_PROG = 0x4000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PERMANENT6 = 0x8000000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_SMALLMTU = 0x40000 + RTF_STATIC = 0x800 + RTF_STOPSRCH = 0x2000000 + RTF_UNREACHABLE = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_EXPIRE = 0xf + RTM_GET = 0x4 + RTM_GETNEXT = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTLOST = 0x10 + RTM_RTTUNIT = 0xf4240 + RTM_SAMEADDR = 0x12 + RTM_SET = 0x13 + RTM_VERSION = 0x2 + RTM_VERSION_GR = 0x4 + RTM_VERSION_GR_COMPAT = 0x3 + RTM_VERSION_POLICY = 0x5 + RTM_VERSION_POLICY_EXT = 0x6 + RTM_VERSION_POLICY_PRFN = 0x7 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIGMAX64 = 0xff + SIGQUEUE_MAX = 0x20 + SIOCADDIFVIPA = 0x20006942 + SIOCADDMTU = -0x7ffb9690 + SIOCADDMULTI = -0x7fdf96cf + SIOCADDNETID = -0x7fd796a9 + SIOCADDRT = -0x7fcf8df6 + SIOCAIFADDR = -0x7fbf96e6 + SIOCATMARK = 0x40047307 + SIOCDARP = -0x7fb396e0 + SIOCDELIFVIPA = 0x20006943 + SIOCDELMTU = -0x7ffb968f + SIOCDELMULTI = -0x7fdf96ce + SIOCDELPMTU = -0x7fd78ff6 + SIOCDELRT = -0x7fcf8df5 + SIOCDIFADDR = -0x7fd796e7 + SIOCDNETOPT = -0x3ffe9680 + SIOCDX25XLATE = -0x7fd7969b + SIOCFIFADDR = -0x7fdf966d + SIOCGARP = -0x3fb396da + SIOCGETMTUS = 0x2000696f + SIOCGETSGCNT = -0x3feb8acc + SIOCGETVIFCNT = -0x3feb8acd + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = -0x3fd796df + SIOCGIFADDRS = 0x2000698c + SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBRDADDR = -0x3fd796dd + SIOCGIFCONF = -0x3ff796bb + SIOCGIFCONFGLOB = -0x3ff79670 + SIOCGIFDSTADDR = -0x3fd796de + SIOCGIFFLAGS = -0x3fd796ef + SIOCGIFGIDLIST = 0x20006968 + SIOCGIFHWADDR = -0x3fab966b + SIOCGIFMETRIC = -0x3fd796e9 + SIOCGIFMTU = -0x3fd796aa + SIOCGIFNETMASK = -0x3fd796db + SIOCGIFOPTIONS = -0x3fd796d6 + SIOCGISNO = -0x3fd79695 + SIOCGLOADF = -0x3ffb967e + SIOCGLOWAT = 0x40047303 + SIOCGNETOPT = -0x3ffe96a5 + SIOCGNETOPT1 = -0x3fdf967f + SIOCGNMTUS = 0x2000696e + SIOCGPGRP = 0x40047309 + SIOCGSIZIFCONF = 0x4004696a + SIOCGSRCFILTER = -0x3fe796cb + SIOCGTUNEPHASE = -0x3ffb9676 + SIOCGX25XLATE = -0x3fd7969c + SIOCIFATTACH = -0x7fdf9699 + SIOCIFDETACH = -0x7fdf969a + SIOCIFGETPKEY = -0x7fdf969b + SIOCIF_ATM_DARP = -0x7fdf9683 + SIOCIF_ATM_DUMPARP = -0x7fdf9685 + SIOCIF_ATM_GARP = -0x7fdf9682 + SIOCIF_ATM_IDLE = -0x7fdf9686 + SIOCIF_ATM_SARP = -0x7fdf9681 + SIOCIF_ATM_SNMPARP = -0x7fdf9687 + SIOCIF_ATM_SVC = -0x7fdf9684 + SIOCIF_ATM_UBR = -0x7fdf9688 + SIOCIF_DEVHEALTH = -0x7ffb966c + SIOCIF_IB_ARP_INCOMP = -0x7fdf9677 + SIOCIF_IB_ARP_TIMER = -0x7fdf9678 + SIOCIF_IB_CLEAR_PINFO = -0x3fdf966f + SIOCIF_IB_DEL_ARP = -0x7fdf967f + SIOCIF_IB_DEL_PINFO = -0x3fdf9670 + SIOCIF_IB_DUMP_ARP = -0x7fdf9680 + SIOCIF_IB_GET_ARP = -0x7fdf967e + SIOCIF_IB_GET_INFO = -0x3f879675 + SIOCIF_IB_GET_STATS = -0x3f879672 + SIOCIF_IB_NOTIFY_ADDR_REM = -0x3f87966a + SIOCIF_IB_RESET_STATS = -0x3f879671 + SIOCIF_IB_RESIZE_CQ = -0x7fdf9679 + SIOCIF_IB_SET_ARP = -0x7fdf967d + SIOCIF_IB_SET_PKEY = -0x7fdf967c + SIOCIF_IB_SET_PORT = -0x7fdf967b + SIOCIF_IB_SET_QKEY = -0x7fdf9676 + SIOCIF_IB_SET_QSIZE = -0x7fdf967a + SIOCLISTIFVIPA = 0x20006944 + SIOCSARP = -0x7fb396e2 + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = -0x7fd796f4 + SIOCSIFADDRORI = -0x7fdb9673 + SIOCSIFBRDADDR = -0x7fd796ed + SIOCSIFDSTADDR = -0x7fd796f2 + SIOCSIFFLAGS = -0x7fd796f0 + SIOCSIFGIDLIST = 0x20006969 + SIOCSIFMETRIC = -0x7fd796e8 + SIOCSIFMTU = -0x7fd796a8 + SIOCSIFNETDUMP = -0x7fd796e4 + SIOCSIFNETMASK = -0x7fd796ea + SIOCSIFOPTIONS = -0x7fd796d7 + SIOCSIFSUBCHAN = -0x7fd796e5 + SIOCSISNO = -0x7fd79694 + SIOCSLOADF = -0x3ffb967d + SIOCSLOWAT = 0x80047302 + SIOCSNETOPT = -0x7ffe96a6 + SIOCSPGRP = 0x80047308 + SIOCSX25XLATE = -0x7fd7969d + SOCK_CONN_DGRAM = 0x6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x400 + SO_ACCEPTCONN = 0x2 + SO_AUDIT = 0x8000 + SO_BROADCAST = 0x20 + SO_CKSUMRECV = 0x800 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_KERNACCEPT = 0x2000 + SO_LINGER = 0x80 + SO_NOMULTIPATH = 0x4000 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x100 + SO_PEERID = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMPNS = 0x100a + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USE_IFBUFS = 0x400 + S_BANDURG = 0x400 + S_EMODFMT = 0x3c000000 + S_ENFMT = 0x400 + S_ERROR = 0x100 + S_HANGUP = 0x200 + S_HIPRI = 0x2 + S_ICRYPTO = 0x80000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFJOURNAL = 0x10000 + S_IFLNK = 0xa000 + S_IFMPX = 0x2200 + S_IFMT = 0xf000 + S_IFPDIR = 0x4000000 + S_IFPSDIR = 0x8000000 + S_IFPSSDIR = 0xc000000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFSYSEA = 0x30000000 + S_INPUT = 0x1 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_ITCB = 0x1000000 + S_ITP = 0x800000 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXACL = 0x2000000 + S_IXATTR = 0x40000 + S_IXGRP = 0x8 + S_IXINTERFACE = 0x100000 + S_IXMOD = 0x40000000 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_MSG = 0x8 + S_OUTPUT = 0x4 + S_RDBAND = 0x20 + S_RDNORM = 0x10 + S_RESERVED1 = 0x20000 + S_RESERVED2 = 0x200000 + S_RESERVED3 = 0x400000 + S_RESERVED4 = 0x80000000 + S_RESFMT1 = 0x10000000 + S_RESFMT10 = 0x34000000 + S_RESFMT11 = 0x38000000 + S_RESFMT12 = 0x3c000000 + S_RESFMT2 = 0x14000000 + S_RESFMT3 = 0x18000000 + S_RESFMT4 = 0x1c000000 + S_RESFMT5 = 0x20000000 + S_RESFMT6 = 0x24000000 + S_RESFMT7 = 0x28000000 + S_RESFMT8 = 0x2c000000 + S_WRBAND = 0x80 + S_WRNORM = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x540c + TCGETA = 0x5405 + TCGETS = 0x5401 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_24DAYS_WORTH_OF_SLOWTICKS = 0x3f4800 + TCP_ACLADD = 0x23 + TCP_ACLBIND = 0x26 + TCP_ACLCLEAR = 0x22 + TCP_ACLDEL = 0x24 + TCP_ACLDENY = 0x8 + TCP_ACLFLUSH = 0x21 + TCP_ACLGID = 0x1 + TCP_ACLLS = 0x25 + TCP_ACLSUBNET = 0x4 + TCP_ACLUID = 0x2 + TCP_CWND_DF = 0x16 + TCP_CWND_IF = 0x15 + TCP_DELAY_ACK_FIN = 0x2 + TCP_DELAY_ACK_SYN = 0x1 + TCP_FASTNAME = 0x101080a + TCP_KEEPCNT = 0x13 + TCP_KEEPIDLE = 0x11 + TCP_KEEPINTVL = 0x12 + TCP_LSPRIV = 0x29 + TCP_LUID = 0x20 + TCP_MAXBURST = 0x8 + TCP_MAXDF = 0x64 + TCP_MAXIF = 0x64 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAXWINDOWSCALE = 0xe + TCP_MAX_SACK = 0x4 + TCP_MSS = 0x5b4 + TCP_NODELAY = 0x1 + TCP_NODELAYACK = 0x14 + TCP_NOREDUCE_CWND_EXIT_FRXMT = 0x19 + TCP_NOREDUCE_CWND_IN_FRXMT = 0x18 + TCP_NOTENTER_SSTART = 0x17 + TCP_OPT = 0x19 + TCP_RFC1323 = 0x4 + TCP_SETPRIV = 0x27 + TCP_STDURG = 0x10 + TCP_TIMESTAMP_OPTLEN = 0xc + TCP_UNSETPRIV = 0x28 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETSF = 0x5404 + TCSETSW = 0x5403 + TCXONC = 0x540b + TIOC = 0x5400 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCEXCL = 0x2000740d + TIOCFLUSH = 0x80047410 + TIOCGETC = 0x40067412 + TIOCGETD = 0x40047400 + TIOCGETP = 0x40067408 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047448 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCHPCL = 0x20007402 + TIOCLBIC = 0x8004747e + TIOCLBIS = 0x8004747f + TIOCLGET = 0x4004747c + TIOCLSET = 0x8004747d + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0x80047464 + TIOCMODG = 0x40047403 + TIOCMODS = 0x80047404 + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSDTR = 0x20007479 + TIOCSETC = 0x80067411 + TIOCSETD = 0x80047401 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSSIZE = 0x80087467 + TIOCSTART = 0x2000746e + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x10000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x3 + VDISCRD = 0xc + VDSUSP = 0xa + VEOF = 0x4 + VEOL = 0x5 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xe + VMIN = 0x4 + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0x7 + VSTOP = 0x8 + VSTRT = 0x7 + VSUSP = 0x9 + VT0 = 0x0 + VT1 = 0x8000 + VTDELAY = 0x2000 + VTDLY = 0x8000 + VTIME = 0x5 + VWERSE = 0xd + WPARSTART = 0x1 + WPARSTOP = 0x2 + WPARTTYNAME = "Global" + XCASE = 0x4 + XTABS = 0xc00 + _FDATAFLUSH = 0x2000000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x43) + EADDRNOTAVAIL = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x42) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x38) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x78) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x75) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECLONEME = syscall.Errno(0x52) + ECONNABORTED = syscall.Errno(0x48) + ECONNREFUSED = syscall.Errno(0x4f) + ECONNRESET = syscall.Errno(0x49) + ECORRUPT = syscall.Errno(0x59) + EDEADLK = syscall.Errno(0x2d) + EDESTADDREQ = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x3a) + EDIST = syscall.Errno(0x35) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x58) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFORMAT = syscall.Errno(0x30) + EHOSTDOWN = syscall.Errno(0x50) + EHOSTUNREACH = syscall.Errno(0x51) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x74) + EINPROGRESS = syscall.Errno(0x37) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x4b) + EISDIR = syscall.Errno(0x15) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x55) + EMEDIA = syscall.Errno(0x6e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x3b) + EMULTIHOP = syscall.Errno(0x7d) + ENAMETOOLONG = syscall.Errno(0x56) + ENETDOWN = syscall.Errno(0x45) + ENETRESET = syscall.Errno(0x47) + ENETUNREACH = syscall.Errno(0x46) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x70) + ENOBUFS = syscall.Errno(0x4a) + ENOCONNECT = syscall.Errno(0x32) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x7a) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x31) + ENOLINK = syscall.Errno(0x7e) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENOPROTOOPT = syscall.Errno(0x3d) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x76) + ENOSTR = syscall.Errno(0x7b) + ENOSYS = syscall.Errno(0x6d) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x4c) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x11) + ENOTREADY = syscall.Errno(0x2e) + ENOTRECOVERABLE = syscall.Errno(0x5e) + ENOTRUST = syscall.Errno(0x72) + ENOTSOCK = syscall.Errno(0x39) + ENOTSUP = syscall.Errno(0x7c) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x40) + EOVERFLOW = syscall.Errno(0x7f) + EOWNERDEAD = syscall.Errno(0x5f) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x41) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x53) + EPROTO = syscall.Errno(0x79) + EPROTONOSUPPORT = syscall.Errno(0x3e) + EPROTOTYPE = syscall.Errno(0x3c) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x5d) + ERESTART = syscall.Errno(0x52) + EROFS = syscall.Errno(0x1e) + ESAD = syscall.Errno(0x71) + ESHUTDOWN = syscall.Errno(0x4d) + ESOCKTNOSUPPORT = syscall.Errno(0x3f) + ESOFT = syscall.Errno(0x6f) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x34) + ESYSERROR = syscall.Errno(0x5a) + ETIME = syscall.Errno(0x77) + ETIMEDOUT = syscall.Errno(0x4e) + ETOOMANYREFS = syscall.Errno(0x73) + ETXTBSY = syscall.Errno(0x1a) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x54) + EWOULDBLOCK = syscall.Errno(0xb) + EWRPROTECT = syscall.Errno(0x2f) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGAIO = syscall.Signal(0x17) + SIGALRM = syscall.Signal(0xe) + SIGALRM1 = syscall.Signal(0x26) + SIGBUS = syscall.Signal(0xa) + SIGCAPI = syscall.Signal(0x31) + SIGCHLD = syscall.Signal(0x14) + SIGCLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGCPUFAIL = syscall.Signal(0x3b) + SIGDANGER = syscall.Signal(0x21) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGGRANT = syscall.Signal(0x3c) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOINT = syscall.Signal(0x10) + SIGIOT = syscall.Signal(0x6) + SIGKAP = syscall.Signal(0x3c) + SIGKILL = syscall.Signal(0x9) + SIGLOST = syscall.Signal(0x6) + SIGMAX = syscall.Signal(0x3f) + SIGMAX32 = syscall.Signal(0x3f) + SIGMIGRATE = syscall.Signal(0x23) + SIGMSG = syscall.Signal(0x1b) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x17) + SIGPRE = syscall.Signal(0x24) + SIGPROF = syscall.Signal(0x20) + SIGPTY = syscall.Signal(0x17) + SIGPWR = syscall.Signal(0x1d) + SIGQUIT = syscall.Signal(0x3) + SIGRECONFIG = syscall.Signal(0x3a) + SIGRETRACT = syscall.Signal(0x3d) + SIGSAK = syscall.Signal(0x3f) + SIGSEGV = syscall.Signal(0xb) + SIGSOUND = syscall.Signal(0x3e) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGSYSERROR = syscall.Signal(0x30) + SIGTALRM = syscall.Signal(0x26) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVIRT = syscall.Signal(0x25) + SIGVTALRM = syscall.Signal(0x22) + SIGWAITING = syscall.Signal(0x27) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "not owner"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "I/O error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "arg list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file number"}, + {10, "ECHILD", "no child processes"}, + {11, "EWOULDBLOCK", "resource temporarily unavailable"}, + {12, "ENOMEM", "not enough space"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "ENOTEMPTY", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "file table overflow"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "not a typewriter"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "deadlock condition if locked"}, + {46, "ENOTREADY", "device not ready"}, + {47, "EWRPROTECT", "write-protected media"}, + {48, "EFORMAT", "unformatted or incompatible media"}, + {49, "ENOLCK", "no locks available"}, + {50, "ENOCONNECT", "cannot Establish Connection"}, + {52, "ESTALE", "missing file or filesystem"}, + {53, "EDIST", "requests blocked by Administrator"}, + {55, "EINPROGRESS", "operation now in progress"}, + {56, "EALREADY", "operation already in progress"}, + {57, "ENOTSOCK", "socket operation on non-socket"}, + {58, "EDESTADDREQ", "destination address required"}, + {59, "EMSGSIZE", "message too long"}, + {60, "EPROTOTYPE", "protocol wrong type for socket"}, + {61, "ENOPROTOOPT", "protocol not available"}, + {62, "EPROTONOSUPPORT", "protocol not supported"}, + {63, "ESOCKTNOSUPPORT", "socket type not supported"}, + {64, "EOPNOTSUPP", "operation not supported on socket"}, + {65, "EPFNOSUPPORT", "protocol family not supported"}, + {66, "EAFNOSUPPORT", "addr family not supported by protocol"}, + {67, "EADDRINUSE", "address already in use"}, + {68, "EADDRNOTAVAIL", "can't assign requested address"}, + {69, "ENETDOWN", "network is down"}, + {70, "ENETUNREACH", "network is unreachable"}, + {71, "ENETRESET", "network dropped connection on reset"}, + {72, "ECONNABORTED", "software caused connection abort"}, + {73, "ECONNRESET", "connection reset by peer"}, + {74, "ENOBUFS", "no buffer space available"}, + {75, "EISCONN", "socket is already connected"}, + {76, "ENOTCONN", "socket is not connected"}, + {77, "ESHUTDOWN", "can't send after socket shutdown"}, + {78, "ETIMEDOUT", "connection timed out"}, + {79, "ECONNREFUSED", "connection refused"}, + {80, "EHOSTDOWN", "host is down"}, + {81, "EHOSTUNREACH", "no route to host"}, + {82, "ERESTART", "restart the system call"}, + {83, "EPROCLIM", "too many processes"}, + {84, "EUSERS", "too many users"}, + {85, "ELOOP", "too many levels of symbolic links"}, + {86, "ENAMETOOLONG", "file name too long"}, + {88, "EDQUOT", "disk quota exceeded"}, + {89, "ECORRUPT", "invalid file system control data detected"}, + {90, "ESYSERROR", "for future use "}, + {93, "EREMOTE", "item is not local to host"}, + {94, "ENOTRECOVERABLE", "state not recoverable "}, + {95, "EOWNERDEAD", "previous owner died "}, + {109, "ENOSYS", "function not implemented"}, + {110, "EMEDIA", "media surface error"}, + {111, "ESOFT", "I/O completed, but needs relocation"}, + {112, "ENOATTR", "no attribute found"}, + {113, "ESAD", "security Authentication Denied"}, + {114, "ENOTRUST", "not a Trusted Program"}, + {115, "ETOOMANYREFS", "too many references: can't splice"}, + {116, "EILSEQ", "invalid wide character"}, + {117, "ECANCELED", "asynchronous I/O cancelled"}, + {118, "ENOSR", "out of STREAMS resources"}, + {119, "ETIME", "system call timed out"}, + {120, "EBADMSG", "next message has wrong type"}, + {121, "EPROTO", "error in protocol"}, + {122, "ENODATA", "no message on stream head read q"}, + {123, "ENOSTR", "fd not associated with a stream"}, + {124, "ENOTSUP", "unsupported attribute value"}, + {125, "EMULTIHOP", "multihop is not allowed"}, + {126, "ENOLINK", "the server link has been severed"}, + {127, "EOVERFLOW", "value too large to be stored in data type"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "IOT/Abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible/complete"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {27, "SIGMSG", "input device data"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGPWR", "power-failure"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPROF", "profiling timer expired"}, + {33, "SIGDANGER", "paging space low"}, + {34, "SIGVTALRM", "virtual timer expired"}, + {35, "SIGMIGRATE", "signal 35"}, + {36, "SIGPRE", "signal 36"}, + {37, "SIGVIRT", "signal 37"}, + {38, "SIGTALRM", "signal 38"}, + {39, "SIGWAITING", "signal 39"}, + {48, "SIGSYSERROR", "signal 48"}, + {49, "SIGCAPI", "signal 49"}, + {58, "SIGRECONFIG", "signal 58"}, + {59, "SIGCPUFAIL", "CPU Failure Predicted"}, + {60, "SIGKAP", "monitor mode granted"}, + {61, "SIGRETRACT", "monitor mode retracted"}, + {62, "SIGSOUND", "sound completed"}, + {63, "SIGSAK", "secure attention"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go new file mode 100644 index 00000000..ed04fd1b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -0,0 +1,1373 @@ +// mkerrors.sh -maix64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64,aix + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -maix64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BYPASS = 0x19 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_INTF = 0x14 + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x1e + AF_NDD = 0x17 + AF_NETWARE = 0x16 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_RIF = 0x15 + AF_ROUTE = 0x11 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x400000 + ARPHRD_802_3 = 0x6 + ARPHRD_802_5 = 0x6 + ARPHRD_ETHER = 0x1 + ARPHRD_FDDI = 0x1 + B0 = 0x0 + B110 = 0x3 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2400 = 0xb + B300 = 0x7 + B38400 = 0xf + B4800 = 0xc + B50 = 0x1 + B600 = 0x8 + B75 = 0x2 + B9600 = 0xd + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x1000 + BSDLY = 0x1000 + CAP_AACCT = 0x6 + CAP_ARM_APPLICATION = 0x5 + CAP_BYPASS_RAC_VMM = 0x3 + CAP_CLEAR = 0x0 + CAP_CREDENTIALS = 0x7 + CAP_EFFECTIVE = 0x1 + CAP_EWLM_AGENT = 0x4 + CAP_INHERITABLE = 0x2 + CAP_MAXIMUM = 0x7 + CAP_NUMA_ATTACH = 0x2 + CAP_PERMITTED = 0x3 + CAP_PROPAGATE = 0x1 + CAP_PROPOGATE = 0x1 + CAP_SET = 0x1 + CBAUD = 0xf + CFLUSH = 0xf + CIBAUD = 0xf0000 + CLOCAL = 0x800 + CLOCK_MONOTONIC = 0xa + CLOCK_PROCESS_CPUTIME_ID = 0xb + CLOCK_REALTIME = 0x9 + CLOCK_THREAD_CPUTIME_ID = 0xc + CR0 = 0x0 + CR1 = 0x100 + CR2 = 0x200 + CR3 = 0x300 + CRDLY = 0x300 + CREAD = 0x80 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIOCGIFCONF = -0x3fef96dc + CSIZE = 0x30 + CSMAP_DIR = "/usr/lib/nls/csmap/" + CSTART = '\021' + CSTOP = '\023' + CSTOPB = 0x40 + CSUSP = 0x1a + ECHO = 0x8 + ECHOCTL = 0x20000 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x80000 + ECHONL = 0x40 + ECHOPRT = 0x40000 + ECH_ICMPID = 0x2 + ETHERNET_CSMACD = 0x6 + EVENP = 0x80 + EXCONTINUE = 0x0 + EXDLOK = 0x3 + EXIO = 0x2 + EXPGIO = 0x0 + EXRESUME = 0x2 + EXRETURN = 0x1 + EXSIG = 0x4 + EXTA = 0xe + EXTB = 0xf + EXTRAP = 0x1 + EYEC_RTENTRYA = 0x257274656e747241 + EYEC_RTENTRYF = 0x257274656e747246 + E_ACC = 0x0 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0xfffe + FF0 = 0x0 + FF1 = 0x2000 + FFDLY = 0x2000 + FLUSHBAND = 0x40 + FLUSHLOW = 0x8 + FLUSHO = 0x100000 + FLUSHR = 0x1 + FLUSHRW = 0x3 + FLUSHW = 0x2 + F_CLOSEM = 0xa + F_DUP2FD = 0xe + F_DUPFD = 0x0 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETLK64 = 0xb + F_GETOWN = 0x8 + F_LOCK = 0x1 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLK64 = 0xc + F_SETLKW = 0xd + F_SETLKW64 = 0xd + F_SETOWN = 0x9 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_TSTLK = 0xf + F_ULOCK = 0x0 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMP6_FILTER = 0x26 + ICMP6_SEC_SEND_DEL = 0x46 + ICMP6_SEC_SEND_GET = 0x47 + ICMP6_SEC_SEND_SET = 0x44 + ICMP6_SEC_SEND_SET_CGA_ADDR = 0x45 + ICRNL = 0x100 + IEXTEN = 0x200000 + IFA_FIRSTALIAS = 0x2000 + IFA_ROUTE = 0x1 + IFF_64BIT = 0x4000000 + IFF_ALLCAST = 0x20000 + IFF_ALLMULTI = 0x200 + IFF_BPF = 0x8000000 + IFF_BRIDGE = 0x40000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x80c52 + IFF_CHECKSUM_OFFLOAD = 0x10000000 + IFF_D1 = 0x8000 + IFF_D2 = 0x4000 + IFF_D3 = 0x2000 + IFF_D4 = 0x1000 + IFF_DEBUG = 0x4 + IFF_DEVHEALTH = 0x4000 + IFF_DO_HW_LOOPBACK = 0x10000 + IFF_GROUP_ROUTING = 0x2000000 + IFF_IFBUFMGT = 0x800000 + IFF_LINK0 = 0x100000 + IFF_LINK1 = 0x200000 + IFF_LINK2 = 0x400000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x80000 + IFF_NOARP = 0x80 + IFF_NOECHO = 0x800 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_PSEG = 0x40000000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SNAP = 0x8000 + IFF_TCP_DISABLE_CKSUM = 0x20000000 + IFF_TCP_NOCKSUM = 0x1000000 + IFF_UP = 0x1 + IFF_VIPA = 0x80000000 + IFNAMSIZ = 0x10 + IFO_FLUSH = 0x1 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_CEPT = 0x13 + IFT_CLUSTER = 0x3e + IFT_DS3 = 0x1e + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FCS = 0x3a + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIFTUNNEL = 0x3c + IFT_HDH1822 = 0x3 + IFT_HF = 0x3d + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IB = 0xc7 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SN = 0x38 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SP = 0x39 + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TUNNEL = 0x3b + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_VIPA = 0x37 + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x10000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_USE = 0x1 + IPPROTO_AH = 0x33 + IPPROTO_BIP = 0x53 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GIF = 0x8c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_LOCAL = 0x3f + IPPROTO_MAX = 0x100 + IPPROTO_MH = 0x87 + IPPROTO_NONE = 0x3b + IPPROTO_PUP = 0xc + IPPROTO_QOS = 0x2d + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_ADDRFORM = 0x16 + IPV6_ADDR_PREFERENCES = 0x4a + IPV6_ADD_MEMBERSHIP = 0xc + IPV6_AIXRAWSOCKET = 0x39 + IPV6_CHECKSUM = 0x27 + IPV6_DONTFRAG = 0x2d + IPV6_DROP_MEMBERSHIP = 0xd + IPV6_DSTOPTS = 0x36 + IPV6_FLOWINFO_FLOWLABEL = 0xffffff + IPV6_FLOWINFO_PRIFLOW = 0xfffffff + IPV6_FLOWINFO_PRIORITY = 0xf000000 + IPV6_FLOWINFO_SRFLAG = 0x10000000 + IPV6_FLOWINFO_VERSION = 0xf0000000 + IPV6_HOPLIMIT = 0x28 + IPV6_HOPOPTS = 0x34 + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MIPDSTOPTS = 0x36 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_NOPROBE = 0x1c + IPV6_PATHMTU = 0x2e + IPV6_PKTINFO = 0x21 + IPV6_PKTOPTIONS = 0x24 + IPV6_PRIORITY_10 = 0xa000000 + IPV6_PRIORITY_11 = 0xb000000 + IPV6_PRIORITY_12 = 0xc000000 + IPV6_PRIORITY_13 = 0xd000000 + IPV6_PRIORITY_14 = 0xe000000 + IPV6_PRIORITY_15 = 0xf000000 + IPV6_PRIORITY_8 = 0x8000000 + IPV6_PRIORITY_9 = 0x9000000 + IPV6_PRIORITY_BULK = 0x4000000 + IPV6_PRIORITY_CONTROL = 0x7000000 + IPV6_PRIORITY_FILLER = 0x1000000 + IPV6_PRIORITY_INTERACTIVE = 0x6000000 + IPV6_PRIORITY_RESERVED1 = 0x3000000 + IPV6_PRIORITY_RESERVED2 = 0x5000000 + IPV6_PRIORITY_UNATTENDED = 0x2000000 + IPV6_PRIORITY_UNCHARACTERIZED = 0x0 + IPV6_RECVDSTOPTS = 0x38 + IPV6_RECVHOPLIMIT = 0x29 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVHOPS = 0x22 + IPV6_RECVIF = 0x1e + IPV6_RECVPATHMTU = 0x2f + IPV6_RECVPKTINFO = 0x23 + IPV6_RECVRTHDR = 0x33 + IPV6_RECVSRCRT = 0x1d + IPV6_RECVTCLASS = 0x2a + IPV6_RTHDR = 0x32 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RTHDR_TYPE_2 = 0x2 + IPV6_SENDIF = 0x1f + IPV6_SRFLAG_LOOSE = 0x0 + IPV6_SRFLAG_STRICT = 0x10000000 + IPV6_TCLASS = 0x2b + IPV6_TOKEN_LENGTH = 0x40 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2c + IPV6_V6ONLY = 0x25 + IPV6_VERSION = 0x60000000 + IP_ADDRFORM = 0x16 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x3c + IP_BLOCK_SOURCE = 0x3a + IP_BROADCAST_IF = 0x10 + IP_CACHE_LINE_SIZE = 0x80 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DHCPMODE = 0x11 + IP_DONTFRAG = 0x19 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x3d + IP_FINDPMTU = 0x1a + IP_HDRINCL = 0x2 + IP_INC_MEMBERSHIPS = 0x14 + IP_INIT_MEMBERSHIP = 0x14 + IP_MAXPACKET = 0xffff + IP_MF = 0x2000 + IP_MSS = 0x240 + IP_MULTICAST_HOPS = 0xa + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OPT = 0x1b + IP_OPTIONS = 0x1 + IP_PMTUAGE = 0x1b + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVIFINFO = 0xf + IP_RECVINTERFACE = 0x20 + IP_RECVMACHDR = 0xe + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x22 + IP_RETOPTS = 0x8 + IP_SOURCE_FILTER = 0x48 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x3b + IP_UNICAST_HOPS = 0x4 + ISIG = 0x1 + ISTRIP = 0x20 + IUCLC = 0x800 + IXANY = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + I_FLUSH = 0x20005305 + LNOFLSH = 0x8000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x10 + MAP_ANONYMOUS = 0x10 + MAP_FILE = 0x0 + MAP_FIXED = 0x100 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_TYPE = 0xf0 + MAP_VARIABLE = 0x0 + MCL_CURRENT = 0x100 + MCL_FUTURE = 0x200 + MSG_ANY = 0x4 + MSG_ARGEXT = 0x400 + MSG_BAND = 0x2 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOR = 0x8 + MSG_HIPRI = 0x1 + MSG_MAXIOVLEN = 0x10 + MSG_MPEG2 = 0x80 + MSG_NONBLOCK = 0x4000 + MSG_NOSIGNAL = 0x100 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x200 + MS_ASYNC = 0x10 + MS_EINTR = 0x80 + MS_INVALIDATE = 0x40 + MS_PER_SEC = 0x3e8 + MS_SYNC = 0x20 + NL0 = 0x0 + NL1 = 0x4000 + NL2 = 0x8000 + NL3 = 0xc000 + NLDLY = 0x4000 + NOFLSH = 0x80 + NOFLUSH = 0x80000000 + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + ONOEOT = 0x80000 + OPOST = 0x1 + OXTABS = 0x40000 + O_ACCMODE = 0x23 + O_APPEND = 0x8 + O_CIO = 0x80 + O_CIOR = 0x800000000 + O_CLOEXEC = 0x800000 + O_CREAT = 0x100 + O_DEFER = 0x2000 + O_DELAY = 0x4000 + O_DIRECT = 0x8000000 + O_DIRECTORY = 0x80000 + O_DSYNC = 0x400000 + O_EFSOFF = 0x400000000 + O_EFSON = 0x200000000 + O_EXCL = 0x400 + O_EXEC = 0x20 + O_LARGEFILE = 0x4000000 + O_NDELAY = 0x8000 + O_NOCACHE = 0x100000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x1000000 + O_NONBLOCK = 0x4 + O_NONE = 0x3 + O_NSHARE = 0x10000 + O_RAW = 0x100000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSHARE = 0x1000 + O_RSYNC = 0x200000 + O_SEARCH = 0x20 + O_SNAPSHOT = 0x40 + O_SYNC = 0x10 + O_TRUNC = 0x200 + O_TTY_INIT = 0x0 + O_WRONLY = 0x1 + PARENB = 0x100 + PAREXT = 0x100000 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_64BIT = 0x20 + PR_ADDR = 0x2 + PR_ARGEXT = 0x400 + PR_ATOMIC = 0x1 + PR_CONNREQUIRED = 0x4 + PR_FASTHZ = 0x5 + PR_INP = 0x40 + PR_INTRLEVEL = 0x8000 + PR_MLS = 0x100 + PR_MLS_1_LABEL = 0x200 + PR_NOEOR = 0x4000 + PR_RIGHTS = 0x10 + PR_SLOWHZ = 0x2 + PR_WANTRCVD = 0x8 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x9 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DOWNSTREAM = 0x100 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTC_IA64 = 0x3 + RTC_POWER = 0x1 + RTC_POWER_PC = 0x2 + RTF_ACTIVE_DGD = 0x1000000 + RTF_BCE = 0x80000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_BUL = 0x2000 + RTF_CLONE = 0x10000 + RTF_CLONED = 0x20000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FREE_IN_PROG = 0x4000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PERMANENT6 = 0x8000000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_SMALLMTU = 0x40000 + RTF_STATIC = 0x800 + RTF_STOPSRCH = 0x2000000 + RTF_UNREACHABLE = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_EXPIRE = 0xf + RTM_GET = 0x4 + RTM_GETNEXT = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTLOST = 0x10 + RTM_RTTUNIT = 0xf4240 + RTM_SAMEADDR = 0x12 + RTM_SET = 0x13 + RTM_VERSION = 0x2 + RTM_VERSION_GR = 0x4 + RTM_VERSION_GR_COMPAT = 0x3 + RTM_VERSION_POLICY = 0x5 + RTM_VERSION_POLICY_EXT = 0x6 + RTM_VERSION_POLICY_PRFN = 0x7 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIGMAX64 = 0xff + SIGQUEUE_MAX = 0x20 + SIOCADDIFVIPA = 0x20006942 + SIOCADDMTU = -0x7ffb9690 + SIOCADDMULTI = -0x7fdf96cf + SIOCADDNETID = -0x7fd796a9 + SIOCADDRT = -0x7fc78df6 + SIOCAIFADDR = -0x7fbf96e6 + SIOCATMARK = 0x40047307 + SIOCDARP = -0x7fb396e0 + SIOCDELIFVIPA = 0x20006943 + SIOCDELMTU = -0x7ffb968f + SIOCDELMULTI = -0x7fdf96ce + SIOCDELPMTU = -0x7fd78ff6 + SIOCDELRT = -0x7fc78df5 + SIOCDIFADDR = -0x7fd796e7 + SIOCDNETOPT = -0x3ffe9680 + SIOCDX25XLATE = -0x7fd7969b + SIOCFIFADDR = -0x7fdf966d + SIOCGARP = -0x3fb396da + SIOCGETMTUS = 0x2000696f + SIOCGETSGCNT = -0x3feb8acc + SIOCGETVIFCNT = -0x3feb8acd + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = -0x3fd796df + SIOCGIFADDRS = 0x2000698c + SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBRDADDR = -0x3fd796dd + SIOCGIFCONF = -0x3fef96bb + SIOCGIFCONFGLOB = -0x3fef9670 + SIOCGIFDSTADDR = -0x3fd796de + SIOCGIFFLAGS = -0x3fd796ef + SIOCGIFGIDLIST = 0x20006968 + SIOCGIFHWADDR = -0x3fab966b + SIOCGIFMETRIC = -0x3fd796e9 + SIOCGIFMTU = -0x3fd796aa + SIOCGIFNETMASK = -0x3fd796db + SIOCGIFOPTIONS = -0x3fd796d6 + SIOCGISNO = -0x3fd79695 + SIOCGLOADF = -0x3ffb967e + SIOCGLOWAT = 0x40047303 + SIOCGNETOPT = -0x3ffe96a5 + SIOCGNETOPT1 = -0x3fdf967f + SIOCGNMTUS = 0x2000696e + SIOCGPGRP = 0x40047309 + SIOCGSIZIFCONF = 0x4004696a + SIOCGSRCFILTER = -0x3fe796cb + SIOCGTUNEPHASE = -0x3ffb9676 + SIOCGX25XLATE = -0x3fd7969c + SIOCIFATTACH = -0x7fdf9699 + SIOCIFDETACH = -0x7fdf969a + SIOCIFGETPKEY = -0x7fdf969b + SIOCIF_ATM_DARP = -0x7fdf9683 + SIOCIF_ATM_DUMPARP = -0x7fdf9685 + SIOCIF_ATM_GARP = -0x7fdf9682 + SIOCIF_ATM_IDLE = -0x7fdf9686 + SIOCIF_ATM_SARP = -0x7fdf9681 + SIOCIF_ATM_SNMPARP = -0x7fdf9687 + SIOCIF_ATM_SVC = -0x7fdf9684 + SIOCIF_ATM_UBR = -0x7fdf9688 + SIOCIF_DEVHEALTH = -0x7ffb966c + SIOCIF_IB_ARP_INCOMP = -0x7fdf9677 + SIOCIF_IB_ARP_TIMER = -0x7fdf9678 + SIOCIF_IB_CLEAR_PINFO = -0x3fdf966f + SIOCIF_IB_DEL_ARP = -0x7fdf967f + SIOCIF_IB_DEL_PINFO = -0x3fdf9670 + SIOCIF_IB_DUMP_ARP = -0x7fdf9680 + SIOCIF_IB_GET_ARP = -0x7fdf967e + SIOCIF_IB_GET_INFO = -0x3f879675 + SIOCIF_IB_GET_STATS = -0x3f879672 + SIOCIF_IB_NOTIFY_ADDR_REM = -0x3f87966a + SIOCIF_IB_RESET_STATS = -0x3f879671 + SIOCIF_IB_RESIZE_CQ = -0x7fdf9679 + SIOCIF_IB_SET_ARP = -0x7fdf967d + SIOCIF_IB_SET_PKEY = -0x7fdf967c + SIOCIF_IB_SET_PORT = -0x7fdf967b + SIOCIF_IB_SET_QKEY = -0x7fdf9676 + SIOCIF_IB_SET_QSIZE = -0x7fdf967a + SIOCLISTIFVIPA = 0x20006944 + SIOCSARP = -0x7fb396e2 + SIOCSHIWAT = 0xffffffff80047300 + SIOCSIFADDR = -0x7fd796f4 + SIOCSIFADDRORI = -0x7fdb9673 + SIOCSIFBRDADDR = -0x7fd796ed + SIOCSIFDSTADDR = -0x7fd796f2 + SIOCSIFFLAGS = -0x7fd796f0 + SIOCSIFGIDLIST = 0x20006969 + SIOCSIFMETRIC = -0x7fd796e8 + SIOCSIFMTU = -0x7fd796a8 + SIOCSIFNETDUMP = -0x7fd796e4 + SIOCSIFNETMASK = -0x7fd796ea + SIOCSIFOPTIONS = -0x7fd796d7 + SIOCSIFSUBCHAN = -0x7fd796e5 + SIOCSISNO = -0x7fd79694 + SIOCSLOADF = -0x3ffb967d + SIOCSLOWAT = 0xffffffff80047302 + SIOCSNETOPT = -0x7ffe96a6 + SIOCSPGRP = 0xffffffff80047308 + SIOCSX25XLATE = -0x7fd7969d + SOCK_CONN_DGRAM = 0x6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x400 + SO_ACCEPTCONN = 0x2 + SO_AUDIT = 0x8000 + SO_BROADCAST = 0x20 + SO_CKSUMRECV = 0x800 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_KERNACCEPT = 0x2000 + SO_LINGER = 0x80 + SO_NOMULTIPATH = 0x4000 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x100 + SO_PEERID = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMPNS = 0x100a + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USE_IFBUFS = 0x400 + S_BANDURG = 0x400 + S_EMODFMT = 0x3c000000 + S_ENFMT = 0x400 + S_ERROR = 0x100 + S_HANGUP = 0x200 + S_HIPRI = 0x2 + S_ICRYPTO = 0x80000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFJOURNAL = 0x10000 + S_IFLNK = 0xa000 + S_IFMPX = 0x2200 + S_IFMT = 0xf000 + S_IFPDIR = 0x4000000 + S_IFPSDIR = 0x8000000 + S_IFPSSDIR = 0xc000000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFSYSEA = 0x30000000 + S_INPUT = 0x1 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_ITCB = 0x1000000 + S_ITP = 0x800000 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXACL = 0x2000000 + S_IXATTR = 0x40000 + S_IXGRP = 0x8 + S_IXINTERFACE = 0x100000 + S_IXMOD = 0x40000000 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_MSG = 0x8 + S_OUTPUT = 0x4 + S_RDBAND = 0x20 + S_RDNORM = 0x10 + S_RESERVED1 = 0x20000 + S_RESERVED2 = 0x200000 + S_RESERVED3 = 0x400000 + S_RESERVED4 = 0x80000000 + S_RESFMT1 = 0x10000000 + S_RESFMT10 = 0x34000000 + S_RESFMT11 = 0x38000000 + S_RESFMT12 = 0x3c000000 + S_RESFMT2 = 0x14000000 + S_RESFMT3 = 0x18000000 + S_RESFMT4 = 0x1c000000 + S_RESFMT5 = 0x20000000 + S_RESFMT6 = 0x24000000 + S_RESFMT7 = 0x28000000 + S_RESFMT8 = 0x2c000000 + S_WRBAND = 0x80 + S_WRNORM = 0x40 + TAB0 = 0x0 + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x540c + TCGETA = 0x5405 + TCGETS = 0x5401 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_24DAYS_WORTH_OF_SLOWTICKS = 0x3f4800 + TCP_ACLADD = 0x23 + TCP_ACLBIND = 0x26 + TCP_ACLCLEAR = 0x22 + TCP_ACLDEL = 0x24 + TCP_ACLDENY = 0x8 + TCP_ACLFLUSH = 0x21 + TCP_ACLGID = 0x1 + TCP_ACLLS = 0x25 + TCP_ACLSUBNET = 0x4 + TCP_ACLUID = 0x2 + TCP_CWND_DF = 0x16 + TCP_CWND_IF = 0x15 + TCP_DELAY_ACK_FIN = 0x2 + TCP_DELAY_ACK_SYN = 0x1 + TCP_FASTNAME = 0x101080a + TCP_KEEPCNT = 0x13 + TCP_KEEPIDLE = 0x11 + TCP_KEEPINTVL = 0x12 + TCP_LSPRIV = 0x29 + TCP_LUID = 0x20 + TCP_MAXBURST = 0x8 + TCP_MAXDF = 0x64 + TCP_MAXIF = 0x64 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAXWINDOWSCALE = 0xe + TCP_MAX_SACK = 0x4 + TCP_MSS = 0x5b4 + TCP_NODELAY = 0x1 + TCP_NODELAYACK = 0x14 + TCP_NOREDUCE_CWND_EXIT_FRXMT = 0x19 + TCP_NOREDUCE_CWND_IN_FRXMT = 0x18 + TCP_NOTENTER_SSTART = 0x17 + TCP_OPT = 0x19 + TCP_RFC1323 = 0x4 + TCP_SETPRIV = 0x27 + TCP_STDURG = 0x10 + TCP_TIMESTAMP_OPTLEN = 0xc + TCP_UNSETPRIV = 0x28 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETSF = 0x5404 + TCSETSW = 0x5403 + TCXONC = 0x540b + TIOC = 0x5400 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0xffffffff80047462 + TIOCEXCL = 0x2000740d + TIOCFLUSH = 0xffffffff80047410 + TIOCGETC = 0x40067412 + TIOCGETD = 0x40047400 + TIOCGETP = 0x40067408 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047448 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCHPCL = 0x20007402 + TIOCLBIC = 0xffffffff8004747e + TIOCLBIS = 0xffffffff8004747f + TIOCLGET = 0x4004747c + TIOCLSET = 0xffffffff8004747d + TIOCMBIC = 0xffffffff8004746b + TIOCMBIS = 0xffffffff8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0xffffffff80047464 + TIOCMODG = 0x40047403 + TIOCMODS = 0xffffffff80047404 + TIOCMSET = 0xffffffff8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0xffffffff80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0xffffffff80047469 + TIOCSBRK = 0x2000747b + TIOCSDTR = 0x20007479 + TIOCSETC = 0xffffffff80067411 + TIOCSETD = 0xffffffff80047401 + TIOCSETN = 0xffffffff8006740a + TIOCSETP = 0xffffffff80067409 + TIOCSLTC = 0xffffffff80067475 + TIOCSPGRP = 0xffffffff80047476 + TIOCSSIZE = 0xffffffff80087467 + TIOCSTART = 0x2000746e + TIOCSTI = 0xffffffff80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0xffffffff80087467 + TIOCUCNTL = 0xffffffff80047466 + TOSTOP = 0x10000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x3 + VDISCRD = 0xc + VDSUSP = 0xa + VEOF = 0x4 + VEOL = 0x5 + VEOL2 = 0x6 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xe + VMIN = 0x4 + VQUIT = 0x1 + VREPRINT = 0xb + VSTART = 0x7 + VSTOP = 0x8 + VSTRT = 0x7 + VSUSP = 0x9 + VT0 = 0x0 + VT1 = 0x8000 + VTDELAY = 0x2000 + VTDLY = 0x8000 + VTIME = 0x5 + VWERSE = 0xd + WPARSTART = 0x1 + WPARSTOP = 0x2 + WPARTTYNAME = "Global" + XCASE = 0x4 + XTABS = 0xc00 + _FDATAFLUSH = 0x2000000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x43) + EADDRNOTAVAIL = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x42) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x38) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x78) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x75) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x25) + ECLONEME = syscall.Errno(0x52) + ECONNABORTED = syscall.Errno(0x48) + ECONNREFUSED = syscall.Errno(0x4f) + ECONNRESET = syscall.Errno(0x49) + ECORRUPT = syscall.Errno(0x59) + EDEADLK = syscall.Errno(0x2d) + EDESTADDREQ = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x3a) + EDIST = syscall.Errno(0x35) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x58) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFORMAT = syscall.Errno(0x30) + EHOSTDOWN = syscall.Errno(0x50) + EHOSTUNREACH = syscall.Errno(0x51) + EIDRM = syscall.Errno(0x24) + EILSEQ = syscall.Errno(0x74) + EINPROGRESS = syscall.Errno(0x37) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x4b) + EISDIR = syscall.Errno(0x15) + EL2HLT = syscall.Errno(0x2c) + EL2NSYNC = syscall.Errno(0x26) + EL3HLT = syscall.Errno(0x27) + EL3RST = syscall.Errno(0x28) + ELNRNG = syscall.Errno(0x29) + ELOOP = syscall.Errno(0x55) + EMEDIA = syscall.Errno(0x6e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x3b) + EMULTIHOP = syscall.Errno(0x7d) + ENAMETOOLONG = syscall.Errno(0x56) + ENETDOWN = syscall.Errno(0x45) + ENETRESET = syscall.Errno(0x47) + ENETUNREACH = syscall.Errno(0x46) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x70) + ENOBUFS = syscall.Errno(0x4a) + ENOCONNECT = syscall.Errno(0x32) + ENOCSI = syscall.Errno(0x2b) + ENODATA = syscall.Errno(0x7a) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x31) + ENOLINK = syscall.Errno(0x7e) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x23) + ENOPROTOOPT = syscall.Errno(0x3d) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x76) + ENOSTR = syscall.Errno(0x7b) + ENOSYS = syscall.Errno(0x6d) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x4c) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x11) + ENOTREADY = syscall.Errno(0x2e) + ENOTRECOVERABLE = syscall.Errno(0x5e) + ENOTRUST = syscall.Errno(0x72) + ENOTSOCK = syscall.Errno(0x39) + ENOTSUP = syscall.Errno(0x7c) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x40) + EOVERFLOW = syscall.Errno(0x7f) + EOWNERDEAD = syscall.Errno(0x5f) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x41) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x53) + EPROTO = syscall.Errno(0x79) + EPROTONOSUPPORT = syscall.Errno(0x3e) + EPROTOTYPE = syscall.Errno(0x3c) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x5d) + ERESTART = syscall.Errno(0x52) + EROFS = syscall.Errno(0x1e) + ESAD = syscall.Errno(0x71) + ESHUTDOWN = syscall.Errno(0x4d) + ESOCKTNOSUPPORT = syscall.Errno(0x3f) + ESOFT = syscall.Errno(0x6f) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x34) + ESYSERROR = syscall.Errno(0x5a) + ETIME = syscall.Errno(0x77) + ETIMEDOUT = syscall.Errno(0x4e) + ETOOMANYREFS = syscall.Errno(0x73) + ETXTBSY = syscall.Errno(0x1a) + EUNATCH = syscall.Errno(0x2a) + EUSERS = syscall.Errno(0x54) + EWOULDBLOCK = syscall.Errno(0xb) + EWRPROTECT = syscall.Errno(0x2f) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGAIO = syscall.Signal(0x17) + SIGALRM = syscall.Signal(0xe) + SIGALRM1 = syscall.Signal(0x26) + SIGBUS = syscall.Signal(0xa) + SIGCAPI = syscall.Signal(0x31) + SIGCHLD = syscall.Signal(0x14) + SIGCLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGCPUFAIL = syscall.Signal(0x3b) + SIGDANGER = syscall.Signal(0x21) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGGRANT = syscall.Signal(0x3c) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOINT = syscall.Signal(0x10) + SIGIOT = syscall.Signal(0x6) + SIGKAP = syscall.Signal(0x3c) + SIGKILL = syscall.Signal(0x9) + SIGLOST = syscall.Signal(0x6) + SIGMAX = syscall.Signal(0xff) + SIGMAX32 = syscall.Signal(0x3f) + SIGMIGRATE = syscall.Signal(0x23) + SIGMSG = syscall.Signal(0x1b) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x17) + SIGPRE = syscall.Signal(0x24) + SIGPROF = syscall.Signal(0x20) + SIGPTY = syscall.Signal(0x17) + SIGPWR = syscall.Signal(0x1d) + SIGQUIT = syscall.Signal(0x3) + SIGRECONFIG = syscall.Signal(0x3a) + SIGRETRACT = syscall.Signal(0x3d) + SIGSAK = syscall.Signal(0x3f) + SIGSEGV = syscall.Signal(0xb) + SIGSOUND = syscall.Signal(0x3e) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGSYSERROR = syscall.Signal(0x30) + SIGTALRM = syscall.Signal(0x26) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVIRT = syscall.Signal(0x25) + SIGVTALRM = syscall.Signal(0x22) + SIGWAITING = syscall.Signal(0x27) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "not owner"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "I/O error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "arg list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file number"}, + {10, "ECHILD", "no child processes"}, + {11, "EWOULDBLOCK", "resource temporarily unavailable"}, + {12, "ENOMEM", "not enough space"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "ENOTEMPTY", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "file table overflow"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "not a typewriter"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "deadlock condition if locked"}, + {46, "ENOTREADY", "device not ready"}, + {47, "EWRPROTECT", "write-protected media"}, + {48, "EFORMAT", "unformatted or incompatible media"}, + {49, "ENOLCK", "no locks available"}, + {50, "ENOCONNECT", "cannot Establish Connection"}, + {52, "ESTALE", "missing file or filesystem"}, + {53, "EDIST", "requests blocked by Administrator"}, + {55, "EINPROGRESS", "operation now in progress"}, + {56, "EALREADY", "operation already in progress"}, + {57, "ENOTSOCK", "socket operation on non-socket"}, + {58, "EDESTADDREQ", "destination address required"}, + {59, "EMSGSIZE", "message too long"}, + {60, "EPROTOTYPE", "protocol wrong type for socket"}, + {61, "ENOPROTOOPT", "protocol not available"}, + {62, "EPROTONOSUPPORT", "protocol not supported"}, + {63, "ESOCKTNOSUPPORT", "socket type not supported"}, + {64, "EOPNOTSUPP", "operation not supported on socket"}, + {65, "EPFNOSUPPORT", "protocol family not supported"}, + {66, "EAFNOSUPPORT", "addr family not supported by protocol"}, + {67, "EADDRINUSE", "address already in use"}, + {68, "EADDRNOTAVAIL", "can't assign requested address"}, + {69, "ENETDOWN", "network is down"}, + {70, "ENETUNREACH", "network is unreachable"}, + {71, "ENETRESET", "network dropped connection on reset"}, + {72, "ECONNABORTED", "software caused connection abort"}, + {73, "ECONNRESET", "connection reset by peer"}, + {74, "ENOBUFS", "no buffer space available"}, + {75, "EISCONN", "socket is already connected"}, + {76, "ENOTCONN", "socket is not connected"}, + {77, "ESHUTDOWN", "can't send after socket shutdown"}, + {78, "ETIMEDOUT", "connection timed out"}, + {79, "ECONNREFUSED", "connection refused"}, + {80, "EHOSTDOWN", "host is down"}, + {81, "EHOSTUNREACH", "no route to host"}, + {82, "ERESTART", "restart the system call"}, + {83, "EPROCLIM", "too many processes"}, + {84, "EUSERS", "too many users"}, + {85, "ELOOP", "too many levels of symbolic links"}, + {86, "ENAMETOOLONG", "file name too long"}, + {88, "EDQUOT", "disk quota exceeded"}, + {89, "ECORRUPT", "invalid file system control data detected"}, + {90, "ESYSERROR", "for future use "}, + {93, "EREMOTE", "item is not local to host"}, + {94, "ENOTRECOVERABLE", "state not recoverable "}, + {95, "EOWNERDEAD", "previous owner died "}, + {109, "ENOSYS", "function not implemented"}, + {110, "EMEDIA", "media surface error"}, + {111, "ESOFT", "I/O completed, but needs relocation"}, + {112, "ENOATTR", "no attribute found"}, + {113, "ESAD", "security Authentication Denied"}, + {114, "ENOTRUST", "not a Trusted Program"}, + {115, "ETOOMANYREFS", "too many references: can't splice"}, + {116, "EILSEQ", "invalid wide character"}, + {117, "ECANCELED", "asynchronous I/O cancelled"}, + {118, "ENOSR", "out of STREAMS resources"}, + {119, "ETIME", "system call timed out"}, + {120, "EBADMSG", "next message has wrong type"}, + {121, "EPROTO", "error in protocol"}, + {122, "ENODATA", "no message on stream head read q"}, + {123, "ENOSTR", "fd not associated with a stream"}, + {124, "ENOTSUP", "unsupported attribute value"}, + {125, "EMULTIHOP", "multihop is not allowed"}, + {126, "ENOLINK", "the server link has been severed"}, + {127, "EOVERFLOW", "value too large to be stored in data type"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "IOT/Abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible/complete"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {27, "SIGMSG", "input device data"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGPWR", "power-failure"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPROF", "profiling timer expired"}, + {33, "SIGDANGER", "paging space low"}, + {34, "SIGVTALRM", "virtual timer expired"}, + {35, "SIGMIGRATE", "signal 35"}, + {36, "SIGPRE", "signal 36"}, + {37, "SIGVIRT", "signal 37"}, + {38, "SIGTALRM", "signal 38"}, + {39, "SIGWAITING", "signal 39"}, + {48, "SIGSYSERROR", "signal 48"}, + {49, "SIGCAPI", "signal 49"}, + {58, "SIGRECONFIG", "signal 58"}, + {59, "SIGCPUFAIL", "CPU Failure Predicted"}, + {60, "SIGGRANT", "monitor mode granted"}, + {61, "SIGRETRACT", "monitor mode retracted"}, + {62, "SIGSOUND", "sound completed"}, + {63, "SIGMAX32", "secure attention"}, + {255, "SIGMAX", "signal 255"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 46a082b6..bbe6089b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -3,7 +3,7 @@ // +build amd64,dragonfly -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -880,6 +880,40 @@ const ( MAP_VPAGETABLE = 0x2000 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x20 + MNT_CMDFLAGS = 0xf0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x4 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SYNCHRONOUS = 0x2 + MNT_TRIM = 0x1000000 + MNT_UPDATE = 0x10000 + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0xf1f0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x1000 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -1168,6 +1202,36 @@ const ( SO_TIMESTAMP = 0x400 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDB = 0x9000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TCIFLUSH = 0x1 TCIOFF = 0x3 TCIOFLUSH = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 2947dc03..d2bbaabc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1345,6 +1345,35 @@ const ( SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TAB0 = 0x0 TAB3 = 0x4 TABDLY = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index c600d012..4f8db783 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1346,6 +1346,35 @@ const ( SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TAB0 = 0x0 TAB3 = 0x4 TABDLY = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index e8240d23..53e5de60 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1354,6 +1354,35 @@ const ( SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TAB0 = 0x0 TAB3 = 0x4 TABDLY = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index ee17d4bd..fe564160 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -3,7 +3,7 @@ // +build 386,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -498,6 +500,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -635,7 +639,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -762,6 +766,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -955,6 +960,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -965,11 +971,28 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1075,6 +1098,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1234,6 +1259,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1289,6 +1315,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1334,11 +1361,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1417,6 +1451,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1459,13 +1496,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1568,17 +1632,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1743,6 +1812,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1838,6 +1908,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1901,6 +1974,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2068,6 +2143,21 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2205,6 +2295,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 64ab9f40..dcfa6674 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -3,7 +3,7 @@ // +build amd64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -498,6 +500,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -635,7 +639,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -762,6 +766,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -955,6 +960,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -965,11 +971,28 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1075,6 +1098,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1234,6 +1259,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1289,6 +1315,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1334,11 +1361,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1418,6 +1452,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1460,13 +1497,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1569,17 +1633,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1744,6 +1813,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1839,6 +1909,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1902,6 +1975,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2069,6 +2144,21 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2205,6 +2295,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 6ae0ac6f..c2ef50c6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -3,7 +3,7 @@ // +build arm,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -963,11 +969,28 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1073,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1232,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1287,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1332,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1424,6 +1458,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1466,13 +1503,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1575,17 +1639,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1750,6 +1819,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1845,6 +1915,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1908,6 +1981,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2075,6 +2150,21 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2211,6 +2301,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index f58450bf..1a820d66 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -3,7 +3,7 @@ // +build arm64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -450,6 +451,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -500,6 +502,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -637,7 +641,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -764,6 +768,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -956,6 +961,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -966,11 +972,28 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1076,6 +1099,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1235,6 +1260,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1316,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1362,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1408,6 +1442,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1450,13 +1487,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1559,17 +1623,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1734,6 +1803,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1830,6 +1900,9 @@ const ( STATX_UID = 0x8 STATX__RESERVED = 0x80000000 SVE_MAGIC = 0x53564501 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1893,6 +1966,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2060,6 +2135,21 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2196,6 +2286,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 465ff2f0..b515b2a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -3,7 +3,7 @@ // +build mips,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x1000 MAP_HUGETLB = 0x80000 MAP_HUGE_MASK = 0x3f @@ -964,11 +970,27 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x800 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1074,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1233,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1288,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1418,6 +1451,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1460,13 +1496,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1569,17 +1632,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1744,6 +1812,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1009 SO_ATTACH_BPF = 0x32 @@ -1840,6 +1909,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1902,6 +1974,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2071,6 +2145,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2208,6 +2297,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 37e851aa..29a88f0e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -3,7 +3,7 @@ // +build mips64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x1000 MAP_HUGETLB = 0x80000 MAP_HUGE_MASK = 0x3f @@ -964,11 +970,27 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x800 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1074,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1233,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1288,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1418,6 +1451,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1460,13 +1496,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1569,17 +1632,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1744,6 +1812,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1009 SO_ATTACH_BPF = 0x32 @@ -1840,6 +1909,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1902,6 +1974,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2071,6 +2145,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2208,6 +2297,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 1131d3ca..0767ac1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -3,7 +3,7 @@ // +build mips64le,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x1000 MAP_HUGETLB = 0x80000 MAP_HUGE_MASK = 0x3f @@ -964,11 +970,27 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x800 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1074,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1233,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1288,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1418,6 +1451,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1460,13 +1496,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1569,17 +1632,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1744,6 +1812,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1009 SO_ATTACH_BPF = 0x32 @@ -1840,6 +1909,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1902,6 +1974,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2071,6 +2145,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2208,6 +2297,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index d04a43b2..269b8131 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -3,7 +3,7 @@ // +build mipsle,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x1000 MAP_HUGETLB = 0x80000 MAP_HUGE_MASK = 0x3f @@ -964,11 +970,27 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x800 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1074,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1233,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1288,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1418,6 +1451,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1460,13 +1496,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1569,17 +1632,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1744,6 +1812,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1009 SO_ATTACH_BPF = 0x32 @@ -1840,6 +1909,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1902,6 +1974,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2071,6 +2145,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2208,6 +2297,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 710410ef..eb52e968 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -3,7 +3,7 @@ // +build ppc64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -963,11 +969,27 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1073,6 +1095,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1234,6 +1258,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1315,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1361,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1474,6 +1507,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1516,13 +1552,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1625,17 +1688,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1800,6 +1868,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1895,6 +1964,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1956,6 +2028,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2129,6 +2203,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2265,6 +2354,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4000 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0xc00 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index c1c1c01b..0563d34b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -3,7 +3,7 @@ // +build ppc64le,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -963,11 +969,27 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1073,6 +1095,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1234,6 +1258,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1315,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1361,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1474,6 +1507,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1516,13 +1552,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1625,17 +1688,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1800,6 +1868,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1895,6 +1964,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1956,6 +2028,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2129,6 +2203,21 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2265,6 +2354,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4000 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0xc00 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go new file mode 100644 index 00000000..e95e3f67 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -0,0 +1,2662 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + AF_XDP = 0x2c + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x9 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 + NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 + OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1d + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 + RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOL_XDP = 0x11b + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 + SYSFS_MAGIC = 0x62656572 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 + TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + UDF_SUPER_MAGIC = 0x15013346 + UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 + XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c7583e15..bad17418 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -3,7 +3,7 @@ // +build s390x,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -64,6 +64,7 @@ const ( AF_VSOCK = 0x28 AF_WANPIPE = 0x19 AF_X25 = 0x9 + AF_XDP = 0x2c ALG_OP_DECRYPT = 0x0 ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 @@ -449,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -497,6 +499,8 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -634,7 +638,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0x8 + IFA_MAX = 0x9 IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -761,6 +765,7 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 IPV6_HOPOPTS = 0x36 @@ -953,6 +958,7 @@ const ( MAP_EXECUTABLE = 0x1000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 MAP_GROWSDOWN = 0x100 MAP_HUGETLB = 0x40000 MAP_HUGE_MASK = 0x3f @@ -963,11 +969,28 @@ const ( MAP_POPULATE = 0x8000 MAP_PRIVATE = 0x2 MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 MAP_TYPE = 0xf MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1073,6 +1096,8 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1232,6 +1257,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1287,6 +1313,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1332,11 +1359,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1478,6 +1512,9 @@ const ( RAMFS_MAGIC = 0x858458f6 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1520,13 +1557,40 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x1d RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 @@ -1629,17 +1693,22 @@ const ( RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba RTPROT_BIRD = 0xc RTPROT_BOOT = 0x3 RTPROT_DHCP = 0x10 RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd RTPROT_STATIC = 0x4 RTPROT_UNSPEC = 0x0 RTPROT_XORP = 0xe @@ -1804,6 +1873,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_X25 = 0x106 + SOL_XDP = 0x11b SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x1e SO_ATTACH_BPF = 0x32 @@ -1899,6 +1969,9 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -1962,6 +2035,8 @@ const ( TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -2129,6 +2204,21 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 @@ -2265,6 +2355,26 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 ZSMALLOC_MAGIC = 0x58295829 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 95de199f..7fdc85b1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build sparc64,linux diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index cd93ce0d..78cc04ea 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -550,6 +550,10 @@ const ( EV_ONESHOT = 0x10 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_CMD_START = 0x1 + EXTATTR_CMD_STOP = 0x2 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 @@ -1016,6 +1020,43 @@ const ( MAP_WIRED = 0x800 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_BASIC_FLAGS = 0xe782807f + MNT_DEFEXPORTED = 0x200 + MNT_DISCARD = 0x800000 + MNT_EXKERB = 0x800 + MNT_EXNORESPORT = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x10000000 + MNT_EXRDONLY = 0x80 + MNT_EXTATTR = 0x1000000 + MNT_FORCE = 0x80000 + MNT_GETARGS = 0x400000 + MNT_IGNORE = 0x100000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_LOG = 0x2000000 + MNT_NOATIME = 0x4000000 + MNT_NOCOREDUMP = 0x8000 + MNT_NODEV = 0x10 + MNT_NODEVMTIME = 0x40000000 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_OP_FLAGS = 0x4d0000 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELATIME = 0x20000 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x80000000 + MNT_SYMPERM = 0x20000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xff90ffff + MNT_WAIT = 0x1 MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CONTROLMBUF = 0x2000000 @@ -1109,7 +1150,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 071701c4..92185e69 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -540,6 +540,10 @@ const ( EV_ONESHOT = 0x10 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_CMD_START = 0x1 + EXTATTR_CMD_STOP = 0x2 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 @@ -1006,6 +1010,43 @@ const ( MAP_WIRED = 0x800 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_BASIC_FLAGS = 0xe782807f + MNT_DEFEXPORTED = 0x200 + MNT_DISCARD = 0x800000 + MNT_EXKERB = 0x800 + MNT_EXNORESPORT = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x10000000 + MNT_EXRDONLY = 0x80 + MNT_EXTATTR = 0x1000000 + MNT_FORCE = 0x80000 + MNT_GETARGS = 0x400000 + MNT_IGNORE = 0x100000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_LOG = 0x2000000 + MNT_NOATIME = 0x4000000 + MNT_NOCOREDUMP = 0x8000 + MNT_NODEV = 0x10 + MNT_NODEVMTIME = 0x40000000 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_OP_FLAGS = 0x4d0000 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELATIME = 0x20000 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x80000000 + MNT_SYMPERM = 0x20000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xff90ffff + MNT_WAIT = 0x1 MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CONTROLMBUF = 0x2000000 @@ -1099,7 +1140,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 5fe56ae8..373ad454 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -532,6 +532,10 @@ const ( EV_ONESHOT = 0x10 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_CMD_START = 0x1 + EXTATTR_CMD_STOP = 0x2 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 @@ -996,6 +1000,43 @@ const ( MAP_STACK = 0x2000 MAP_TRYFIXED = 0x400 MAP_WIRED = 0x800 + MNT_ASYNC = 0x40 + MNT_BASIC_FLAGS = 0xe782807f + MNT_DEFEXPORTED = 0x200 + MNT_DISCARD = 0x800000 + MNT_EXKERB = 0x800 + MNT_EXNORESPORT = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x10000000 + MNT_EXRDONLY = 0x80 + MNT_EXTATTR = 0x1000000 + MNT_FORCE = 0x80000 + MNT_GETARGS = 0x400000 + MNT_IGNORE = 0x100000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_LOG = 0x2000000 + MNT_NOATIME = 0x4000000 + MNT_NOCOREDUMP = 0x8000 + MNT_NODEV = 0x10 + MNT_NODEVMTIME = 0x40000000 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_OP_FLAGS = 0x4d0000 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELATIME = 0x20000 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x80000000 + MNT_SYMPERM = 0x20000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0xff90ffff + MNT_WAIT = 0x1 MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CONTROLMBUF = 0x2000000 @@ -1089,7 +1130,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 0a1c3e7e..d8be0451 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -899,6 +899,32 @@ const ( MAP_TRYFIXED = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 MSG_BCAST = 0x100 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -1218,6 +1244,34 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TCIFLUSH = 0x1 TCIOFLUSH = 0x3 TCOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index acfc6646..12261a56 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -472,6 +472,7 @@ const ( F_GETLK = 0x7 F_GETOWN = 0x5 F_ISATTY = 0xb + F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -938,6 +939,32 @@ const ( MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1296,6 +1323,34 @@ const ( SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TCIFLUSH = 0x1 TCIOFF = 0x3 TCIOFLUSH = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 93e37c4b..79d5695c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -899,6 +899,32 @@ const ( MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1221,6 +1247,34 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TCIFLUSH = 0x1 TCIOFLUSH = 0x3 TCOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index be42830c..22569db3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -996,6 +996,39 @@ const ( SO_USELOOPBACK = 0x40 SO_VRRP = 0x1017 SO_WROFF = 0x2 + S_ENFMT = 0x400 + S_IAMB = 0x1ff + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFDOOR = 0xd000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFNAM = 0x5000 + S_IFPORT = 0xe000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_INSEM = 0x1 + S_INSHD = 0x2 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 TAB0 = 0x0 TAB1 = 0x800 TAB2 = 0x1000 @@ -1102,6 +1135,8 @@ const ( TIOCSTOP = 0x746f TIOCSWINSZ = 0x5467 TOSTOP = 0x100 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VCEOF = 0x8 VCEOL = 0x9 VDISCARD = 0xd diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go new file mode 100644 index 00000000..ab2f7612 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -0,0 +1,1519 @@ +// mksyscall_aix.pl -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build aix,ppc + +package unix + +/* +#include +int utimes(uintptr_t, uintptr_t); +int utimensat(int, uintptr_t, uintptr_t, int); +int getcwd(uintptr_t, size_t); +int accept(int, uintptr_t, uintptr_t); +int getdirent(int, uintptr_t, size_t); +int wait4(int, uintptr_t, int, uintptr_t); +int ioctl(int, int, uintptr_t); +int fcntl(uintptr_t, int, uintptr_t); +int acct(uintptr_t); +int chdir(uintptr_t); +int chroot(uintptr_t); +int close(int); +int dup(int); +int dup3(int, int, int); +void exit(int); +int faccessat(int, uintptr_t, unsigned int, int); +int fallocate(int, unsigned int, long long, long long); +int fchdir(int); +int fchmod(int, unsigned int); +int fchmodat(int, uintptr_t, unsigned int, int); +int fchownat(int, uintptr_t, int, int, int); +int fdatasync(int); +int fsync(int); +int getpgid(int); +int getpgrp(); +int getpid(); +int getppid(); +int getpriority(int, int); +int getrusage(int, uintptr_t); +int getsid(int); +int kill(int, int); +int syslog(int, uintptr_t, size_t); +int mkdir(int, uintptr_t, unsigned int); +int mkdirat(int, uintptr_t, unsigned int); +int mkfifo(uintptr_t, unsigned int); +int mknod(uintptr_t, unsigned int, int); +int mknodat(int, uintptr_t, unsigned int, int); +int nanosleep(uintptr_t, uintptr_t); +int open64(uintptr_t, int, unsigned int); +int openat(int, uintptr_t, int, unsigned int); +int read(int, uintptr_t, size_t); +int readlink(uintptr_t, uintptr_t, size_t); +int removexattr(uintptr_t, uintptr_t); +int renameat(int, uintptr_t, int, uintptr_t); +int setdomainname(uintptr_t, size_t); +int sethostname(uintptr_t, size_t); +int setpgid(int, int); +int setsid(); +int settimeofday(uintptr_t); +int setuid(int); +int setgid(int); +int setpriority(int, int, int); +int statx(int, uintptr_t, int, int, uintptr_t); +int sync(); +long long tee(int, int, int, int); +uintptr_t times(uintptr_t); +int umask(int); +int uname(uintptr_t); +int unlink(uintptr_t); +int unlinkat(int, uintptr_t, int); +int unshare(int); +int ustat(int, uintptr_t); +int write(int, uintptr_t, size_t); +int dup2(int, int); +int posix_fadvise64(int, long long, long long, int); +int fchown(int, int, int); +int fstat(int, uintptr_t); +int fstatat(int, uintptr_t, uintptr_t, int); +int fstatfs(int, uintptr_t); +int ftruncate(int, long long); +int getegid(); +int geteuid(); +int getgid(); +int getuid(); +int lchown(uintptr_t, int, int); +int listen(int, int); +int lstat(uintptr_t, uintptr_t); +int pause(); +int pread64(int, uintptr_t, size_t, long long); +int pwrite64(int, uintptr_t, size_t, long long); +int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); +int setregid(int, int); +int setreuid(int, int); +int shutdown(int, int); +long long splice(int, uintptr_t, int, uintptr_t, int, int); +int stat(uintptr_t, uintptr_t); +int statfs(uintptr_t, uintptr_t); +int truncate(uintptr_t, long long); +int bind(int, uintptr_t, uintptr_t); +int connect(int, uintptr_t, uintptr_t); +int getgroups(int, uintptr_t); +int setgroups(int, uintptr_t); +int getsockopt(int, int, int, uintptr_t, uintptr_t); +int setsockopt(int, int, int, uintptr_t, uintptr_t); +int socket(int, int, int); +int socketpair(int, int, int, uintptr_t); +int getpeername(int, uintptr_t, uintptr_t); +int getsockname(int, uintptr_t, uintptr_t); +int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int recvmsg(int, uintptr_t, int); +int sendmsg(int, uintptr_t, int); +int munmap(uintptr_t, uintptr_t); +int madvise(uintptr_t, size_t, int); +int mprotect(uintptr_t, size_t, int); +int mlock(uintptr_t, size_t); +int mlockall(int); +int msync(uintptr_t, size_t, int); +int munlock(uintptr_t, size_t); +int munlockall(); +int pipe(uintptr_t); +int pipe2(uintptr_t, int); +int poll(uintptr_t, int, int); +int gettimeofday(uintptr_t, uintptr_t); +int time(uintptr_t); +int utime(uintptr_t, uintptr_t); +int getrlimit64(int, uintptr_t); +int setrlimit64(int, uintptr_t); +long long lseek64(int, long long, int); +uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); + +*/ +import "C" +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utimes(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utimensat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times))), C.int(flag)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getcwd(buf []byte) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.getcwd(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, er := C.accept(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirent(fd int, buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.getdirent(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) { + r0, er := C.wait4(C.int(pid), C.uintptr_t(uintptr(unsafe.Pointer(status))), C.int(options), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) + wpid = Pid_t(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + r = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(uintptr(unsafe.Pointer(lk)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.acct(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.chdir(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.chroot(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + r0, er := C.close(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, er := C.dup(C.int(oldfd)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + r0, er := C.dup3(C.int(oldfd), C.int(newfd), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + C.exit(C.int(code)) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.faccessat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + r0, er := C.fallocate(C.int(fd), C.uint(mode), C.longlong(off), C.longlong(len)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + r0, er := C.fchdir(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + r0, er := C.fchmod(C.int(fd), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fchmodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fchownat(C.int(dirfd), C.uintptr_t(_p0), C.int(uid), C.int(gid), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + val = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + r0, er := C.fdatasync(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + r0, er := C.fsync(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, er := C.getpgid(C.int(pid)) + pgid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pid int) { + r0, _ := C.getpgrp() + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := C.getpid() + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := C.getppid() + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, er := C.getpriority(C.int(which), C.int(who)) + prio = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + r0, er := C.getrusage(C.int(who), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, er := C.getsid(C.int(pid)) + sid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + r0, er := C.kill(C.int(pid), C.int(sig)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.syslog(C.int(typ), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(dirfd int, path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkfifo(C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + r0, er := C.nanosleep(C.uintptr_t(uintptr(unsafe.Pointer(time))), C.uintptr_t(uintptr(unsafe.Pointer(leftover)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.open64(C.uintptr_t(_p0), C.int(mode), C.uint(perm)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.openat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.uint(mode)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + var _p1 *byte + if len(buf) > 0 { + _p1 = &buf[0] + } + var _p2 int + _p2 = len(buf) + r0, er := C.readlink(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(_p1))), C.size_t(_p2)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + _p1 := uintptr(unsafe.Pointer(C.CString(attr))) + r0, er := C.removexattr(C.uintptr_t(_p0), C.uintptr_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(oldpath))) + _p1 := uintptr(unsafe.Pointer(C.CString(newpath))) + r0, er := C.renameat(C.int(olddirfd), C.uintptr_t(_p0), C.int(newdirfd), C.uintptr_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.setdomainname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.sethostname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + r0, er := C.setpgid(C.int(pid), C.int(pgid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, er := C.setsid() + pid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + r0, er := C.settimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + r0, er := C.setuid(C.int(uid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(uid int) (err error) { + r0, er := C.setgid(C.int(uid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + r0, er := C.setpriority(C.int(which), C.int(who), C.int(prio)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.statx(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.int(mask), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + C.sync() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, er := C.tee(C.int(rfd), C.int(wfd), C.int(len), C.int(flags)) + n = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, er := C.times(C.uintptr_t(uintptr(unsafe.Pointer(tms)))) + ticks = uintptr(r0) + if uintptr(r0) == ^uintptr(0) && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := C.umask(C.int(mask)) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, er := C.uname(C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.unlink(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.unlinkat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + r0, er := C.unshare(C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + r0, er := C.ustat(C.int(dev), C.uintptr_t(uintptr(unsafe.Pointer(ubuf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + r0, er := C.dup2(C.int(oldfd), C.int(newfd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + r0, er := C.posix_fadvise64(C.int(fd), C.longlong(offset), C.longlong(length), C.int(advice)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + r0, er := C.fchown(C.int(fd), C.int(uid), C.int(gid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + r0, er := C.fstatfs(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + r0, er := C.ftruncate(C.int(fd), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := C.getegid() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := C.geteuid() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := C.getgid() + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := C.getuid() + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.lchown(C.uintptr_t(_p0), C.int(uid), C.int(gid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + r0, er := C.listen(C.int(s), C.int(n)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + r0, er := C.pause() + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.pread64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.pwrite64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + r0, er := C.setregid(C.int(rgid), C.int(egid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + r0, er := C.setreuid(C.int(ruid), C.int(euid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + r0, er := C.shutdown(C.int(fd), C.int(how)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, er := C.splice(C.int(rfd), C.uintptr_t(uintptr(unsafe.Pointer(roff))), C.int(wfd), C.uintptr_t(uintptr(unsafe.Pointer(woff))), C.int(len), C.int(flags)) + n = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.statfs(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.truncate(C.uintptr_t(_p0), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + r0, er := C.bind(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + r0, er := C.connect(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, er := C.getgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) + nn = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + r0, er := C.setgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + r0, er := C.getsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(uintptr(unsafe.Pointer(vallen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + r0, er := C.setsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(vallen)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, er := C.socket(C.int(domain), C.int(typ), C.int(proto)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + r0, er := C.socketpair(C.int(domain), C.int(typ), C.int(proto), C.uintptr_t(uintptr(unsafe.Pointer(fd)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + r0, er := C.getpeername(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + r0, er := C.getsockname(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.recvfrom(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(unsafe.Pointer(from))), C.uintptr_t(uintptr(unsafe.Pointer(fromlen)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.sendto(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(to)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + r0, er := C.munmap(C.uintptr_t(addr), C.uintptr_t(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.madvise(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(advice)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.mprotect(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(prot)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.mlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + r0, er := C.mlockall(C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.msync(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.munlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + r0, er := C.munlockall() + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + r0, er := C.pipe(C.uintptr_t(uintptr(unsafe.Pointer(p)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + r0, er := C.pipe2(C.uintptr_t(uintptr(unsafe.Pointer(p))), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, er := C.poll(C.uintptr_t(uintptr(unsafe.Pointer(fds))), C.int(nfds), C.int(timeout)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *Timeval, tzp *Timezone) (err error) { + r0, er := C.gettimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv))), C.uintptr_t(uintptr(unsafe.Pointer(tzp)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, er := C.time(C.uintptr_t(uintptr(unsafe.Pointer(t)))) + tt = Time_t(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utime(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, er := C.getrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) + off = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, er := C.mmap(C.uintptr_t(addr), C.uintptr_t(length), C.int(prot), C.int(flags), C.int(fd), C.longlong(offset)) + xaddr = uintptr(r0) + if uintptr(r0) == ^uintptr(0) && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go new file mode 100644 index 00000000..2e4f93fb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -0,0 +1,1519 @@ +// mksyscall_aix.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build aix,ppc64 + +package unix + +/* +#include +int utimes(uintptr_t, uintptr_t); +int utimensat(int, uintptr_t, uintptr_t, int); +int getcwd(uintptr_t, size_t); +int accept(int, uintptr_t, uintptr_t); +int getdirent(int, uintptr_t, size_t); +int wait4(int, uintptr_t, int, uintptr_t); +int ioctl(int, int, uintptr_t); +int fcntl(uintptr_t, int, uintptr_t); +int acct(uintptr_t); +int chdir(uintptr_t); +int chroot(uintptr_t); +int close(int); +int dup(int); +int dup3(int, int, int); +void exit(int); +int faccessat(int, uintptr_t, unsigned int, int); +int fallocate(int, unsigned int, long long, long long); +int fchdir(int); +int fchmod(int, unsigned int); +int fchmodat(int, uintptr_t, unsigned int, int); +int fchownat(int, uintptr_t, int, int, int); +int fdatasync(int); +int fsync(int); +int getpgid(int); +int getpgrp(); +int getpid(); +int getppid(); +int getpriority(int, int); +int getrusage(int, uintptr_t); +int getsid(int); +int kill(int, int); +int syslog(int, uintptr_t, size_t); +int mkdir(int, uintptr_t, unsigned int); +int mkdirat(int, uintptr_t, unsigned int); +int mkfifo(uintptr_t, unsigned int); +int mknod(uintptr_t, unsigned int, int); +int mknodat(int, uintptr_t, unsigned int, int); +int nanosleep(uintptr_t, uintptr_t); +int open64(uintptr_t, int, unsigned int); +int openat(int, uintptr_t, int, unsigned int); +int read(int, uintptr_t, size_t); +int readlink(uintptr_t, uintptr_t, size_t); +int removexattr(uintptr_t, uintptr_t); +int renameat(int, uintptr_t, int, uintptr_t); +int setdomainname(uintptr_t, size_t); +int sethostname(uintptr_t, size_t); +int setpgid(int, int); +int setsid(); +int settimeofday(uintptr_t); +int setuid(int); +int setgid(int); +int setpriority(int, int, int); +int statx(int, uintptr_t, int, int, uintptr_t); +int sync(); +long long tee(int, int, int, int); +uintptr_t times(uintptr_t); +int umask(int); +int uname(uintptr_t); +int unlink(uintptr_t); +int unlinkat(int, uintptr_t, int); +int unshare(int); +int ustat(int, uintptr_t); +int write(int, uintptr_t, size_t); +int dup2(int, int); +int posix_fadvise64(int, long long, long long, int); +int fchown(int, int, int); +int fstat(int, uintptr_t); +int fstatat(int, uintptr_t, uintptr_t, int); +int fstatfs(int, uintptr_t); +int ftruncate(int, long long); +int getegid(); +int geteuid(); +int getgid(); +int getuid(); +int lchown(uintptr_t, int, int); +int listen(int, int); +int lstat(uintptr_t, uintptr_t); +int pause(); +int pread64(int, uintptr_t, size_t, long long); +int pwrite64(int, uintptr_t, size_t, long long); +int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); +int setregid(int, int); +int setreuid(int, int); +int shutdown(int, int); +long long splice(int, uintptr_t, int, uintptr_t, int, int); +int stat(uintptr_t, uintptr_t); +int statfs(uintptr_t, uintptr_t); +int truncate(uintptr_t, long long); +int bind(int, uintptr_t, uintptr_t); +int connect(int, uintptr_t, uintptr_t); +int getgroups(int, uintptr_t); +int setgroups(int, uintptr_t); +int getsockopt(int, int, int, uintptr_t, uintptr_t); +int setsockopt(int, int, int, uintptr_t, uintptr_t); +int socket(int, int, int); +int socketpair(int, int, int, uintptr_t); +int getpeername(int, uintptr_t, uintptr_t); +int getsockname(int, uintptr_t, uintptr_t); +int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int recvmsg(int, uintptr_t, int); +int sendmsg(int, uintptr_t, int); +int munmap(uintptr_t, uintptr_t); +int madvise(uintptr_t, size_t, int); +int mprotect(uintptr_t, size_t, int); +int mlock(uintptr_t, size_t); +int mlockall(int); +int msync(uintptr_t, size_t, int); +int munlock(uintptr_t, size_t); +int munlockall(); +int pipe(uintptr_t); +int pipe2(uintptr_t, int); +int poll(uintptr_t, int, int); +int gettimeofday(uintptr_t, uintptr_t); +int time(uintptr_t); +int utime(uintptr_t, uintptr_t); +int getrlimit(int, uintptr_t); +int setrlimit(int, uintptr_t); +long long lseek(int, long long, int); +uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); + +*/ +import "C" +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utimes(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utimensat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times))), C.int(flag)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getcwd(buf []byte) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.getcwd(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, er := C.accept(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirent(fd int, buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.getdirent(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) { + r0, er := C.wait4(C.int(pid), C.uintptr_t(uintptr(unsafe.Pointer(status))), C.int(options), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) + wpid = Pid_t(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + r = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(uintptr(unsafe.Pointer(lk)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.acct(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.chdir(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.chroot(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + r0, er := C.close(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, er := C.dup(C.int(oldfd)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + r0, er := C.dup3(C.int(oldfd), C.int(newfd), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + C.exit(C.int(code)) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.faccessat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + r0, er := C.fallocate(C.int(fd), C.uint(mode), C.longlong(off), C.longlong(len)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + r0, er := C.fchdir(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + r0, er := C.fchmod(C.int(fd), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fchmodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fchownat(C.int(dirfd), C.uintptr_t(_p0), C.int(uid), C.int(gid), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + val = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + r0, er := C.fdatasync(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + r0, er := C.fsync(C.int(fd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, er := C.getpgid(C.int(pid)) + pgid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pid int) { + r0, _ := C.getpgrp() + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := C.getpid() + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := C.getppid() + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, er := C.getpriority(C.int(which), C.int(who)) + prio = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + r0, er := C.getrusage(C.int(who), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, er := C.getsid(C.int(pid)) + sid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + r0, er := C.kill(C.int(pid), C.int(sig)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.syslog(C.int(typ), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(dirfd int, path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mkfifo(C.uintptr_t(_p0), C.uint(mode)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + r0, er := C.nanosleep(C.uintptr_t(uintptr(unsafe.Pointer(time))), C.uintptr_t(uintptr(unsafe.Pointer(leftover)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.open64(C.uintptr_t(_p0), C.int(mode), C.uint(perm)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.openat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.uint(mode)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + var _p1 *byte + if len(buf) > 0 { + _p1 = &buf[0] + } + var _p2 int + _p2 = len(buf) + r0, er := C.readlink(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(_p1))), C.size_t(_p2)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + _p1 := uintptr(unsafe.Pointer(C.CString(attr))) + r0, er := C.removexattr(C.uintptr_t(_p0), C.uintptr_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(oldpath))) + _p1 := uintptr(unsafe.Pointer(C.CString(newpath))) + r0, er := C.renameat(C.int(olddirfd), C.uintptr_t(_p0), C.int(newdirfd), C.uintptr_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.setdomainname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.sethostname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + r0, er := C.setpgid(C.int(pid), C.int(pgid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, er := C.setsid() + pid = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + r0, er := C.settimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + r0, er := C.setuid(C.int(uid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(uid int) (err error) { + r0, er := C.setgid(C.int(uid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + r0, er := C.setpriority(C.int(which), C.int(who), C.int(prio)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.statx(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.int(mask), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + C.sync() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, er := C.tee(C.int(rfd), C.int(wfd), C.int(len), C.int(flags)) + n = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, er := C.times(C.uintptr_t(uintptr(unsafe.Pointer(tms)))) + ticks = uintptr(r0) + if uintptr(r0) == ^uintptr(0) && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := C.umask(C.int(mask)) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, er := C.uname(C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.unlink(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.unlinkat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + r0, er := C.unshare(C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + r0, er := C.ustat(C.int(dev), C.uintptr_t(uintptr(unsafe.Pointer(ubuf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + r0, er := C.dup2(C.int(oldfd), C.int(newfd)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + r0, er := C.posix_fadvise64(C.int(fd), C.longlong(offset), C.longlong(length), C.int(advice)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + r0, er := C.fchown(C.int(fd), C.int(uid), C.int(gid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + r0, er := C.fstatfs(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + r0, er := C.ftruncate(C.int(fd), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := C.getegid() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := C.geteuid() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := C.getgid() + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := C.getuid() + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.lchown(C.uintptr_t(_p0), C.int(uid), C.int(gid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + r0, er := C.listen(C.int(s), C.int(n)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + r0, er := C.pause() + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.pread64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.pwrite64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + r0, er := C.setregid(C.int(rgid), C.int(egid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + r0, er := C.setreuid(C.int(ruid), C.int(euid)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + r0, er := C.shutdown(C.int(fd), C.int(how)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, er := C.splice(C.int(rfd), C.uintptr_t(uintptr(unsafe.Pointer(roff))), C.int(wfd), C.uintptr_t(uintptr(unsafe.Pointer(woff))), C.int(len), C.int(flags)) + n = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.statfs(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.truncate(C.uintptr_t(_p0), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + r0, er := C.bind(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + r0, er := C.connect(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, er := C.getgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) + nn = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + r0, er := C.setgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + r0, er := C.getsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(uintptr(unsafe.Pointer(vallen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + r0, er := C.setsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(vallen)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, er := C.socket(C.int(domain), C.int(typ), C.int(proto)) + fd = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + r0, er := C.socketpair(C.int(domain), C.int(typ), C.int(proto), C.uintptr_t(uintptr(unsafe.Pointer(fd)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + r0, er := C.getpeername(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + r0, er := C.getsockname(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + var _p1 int + _p1 = len(p) + r0, er := C.recvfrom(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(unsafe.Pointer(from))), C.uintptr_t(uintptr(unsafe.Pointer(fromlen)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + var _p1 int + _p1 = len(buf) + r0, er := C.sendto(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(to)), C.uintptr_t(uintptr(addrlen))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + r0, er := C.munmap(C.uintptr_t(addr), C.uintptr_t(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.madvise(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(advice)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.mprotect(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(prot)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.mlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + r0, er := C.mlockall(C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.msync(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 int + _p1 = len(b) + r0, er := C.munlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + r0, er := C.munlockall() + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + r0, er := C.pipe(C.uintptr_t(uintptr(unsafe.Pointer(p)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + r0, er := C.pipe2(C.uintptr_t(uintptr(unsafe.Pointer(p))), C.int(flags)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, er := C.poll(C.uintptr_t(uintptr(unsafe.Pointer(fds))), C.int(nfds), C.int(timeout)) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *Timeval, tzp *Timezone) (err error) { + r0, er := C.gettimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv))), C.uintptr_t(uintptr(unsafe.Pointer(tzp)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, er := C.time(C.uintptr_t(uintptr(unsafe.Pointer(t)))) + tt = Time_t(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(path))) + r0, er := C.utime(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, er := C.getrlimit(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + r0, er := C.setrlimit(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, er := C.lseek(C.int(fd), C.longlong(offset), C.int(whence)) + off = int64(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, er := C.mmap64(C.uintptr_t(addr), C.uintptr_t(length), C.int(prot), C.int(flags), C.int(fd), C.longlong(offset)) + xaddr = uintptr(r0) + if uintptr(r0) == ^uintptr(0) && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index ac02d4d8..9ce06df6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -420,6 +420,22 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -440,6 +456,21 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func removexattr(path string, attr string, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -460,6 +491,21 @@ func removexattr(path string, attr string, options int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fremovexattr(fd int, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -476,6 +522,17 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { + r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1dd3cfa0..de992704 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -420,6 +420,22 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -440,6 +456,21 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func removexattr(path string, attr string, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -460,6 +491,21 @@ func removexattr(path string, attr string, options int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fremovexattr(fd int, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -476,6 +522,17 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { + r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index cab46e74..81c4f093 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -420,6 +420,22 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -440,6 +456,21 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func removexattr(path string, attr string, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -460,6 +491,21 @@ func removexattr(path string, attr string, options int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fremovexattr(fd int, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -476,6 +522,17 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { + r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13478dd0..338c32d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -420,6 +420,22 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -440,6 +456,21 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func removexattr(path string, attr string, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -460,6 +491,21 @@ func removexattr(path string, attr string, options int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fremovexattr(fd int, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -476,6 +522,17 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { + r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index a86434a7..ad77882b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -377,10 +377,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 040e2f76..d3ba6c46 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -377,10 +377,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index cddc5e86..9dfd77b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -377,10 +377,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 433becfd..35b155a0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1565,6 +1640,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) if e1 != 0 { @@ -1869,6 +1972,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(n int, list *_Gid_t) (nn int, err error) { r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) nn = int(r0) @@ -1912,15 +2025,8 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1929,8 +2035,8 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1939,8 +2045,8 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1949,8 +2055,13 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) if e1 != 0 { err = errnoErr(e1) } @@ -1995,6 +2106,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 33c02b26..11a30786 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1937,6 +2023,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2127,6 +2223,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2142,6 +2253,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index f91b56c2..914f25f0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1734,6 +1809,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1852,6 +1955,16 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1970,8 +2083,8 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1980,15 +2093,13 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) if e1 != 0 { err = errnoErr(e1) } @@ -1997,8 +2108,23 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 52d75952..1d6c5562 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 970a5c13..260631d1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1794,6 +1897,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2004,6 +2117,21 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2025,13 +2153,13 @@ func Time(t *Time_t) (tt Time_t, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2040,8 +2168,13 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2050,13 +2183,13 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2065,13 +2198,8 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2080,13 +2208,13 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2095,15 +2223,13 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2132,6 +2258,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe() (p1 int, p2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + p1 = int(r0) + p2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) xaddr = uintptr(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index b989d0f2..ff2d84fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1881,6 +1967,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2071,6 +2167,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2096,6 +2207,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1f8d14ca..48d14e60 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1881,6 +1967,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2071,6 +2167,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2096,6 +2207,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index a9c7e520..12c17a92 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) if e1 != 0 { @@ -1794,6 +1897,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2004,6 +2117,21 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2025,13 +2153,13 @@ func Time(t *Time_t) (tt Time_t, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2040,8 +2168,13 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2050,13 +2183,13 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2065,13 +2198,8 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2080,13 +2208,13 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2095,15 +2223,13 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2132,6 +2258,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe() (p1 int, p2 int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) + p1 = int(r0) + p2 = int(r1) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) xaddr = uintptr(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 3bb9a209..8300814d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,15 +1595,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1562,6 +1620,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1938,8 +2024,13 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1948,13 +2039,8 @@ func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2153,6 +2239,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2189,6 +2290,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -2217,3 +2333,13 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 56116623..002b4e17 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,15 +1595,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1562,6 +1620,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1938,8 +2024,13 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1948,13 +2039,8 @@ func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2153,6 +2239,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2189,6 +2290,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -2217,3 +2333,13 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go new file mode 100644 index 00000000..542f3a3a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -0,0 +1,2140 @@ +// mksyscall.pl -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 9696a019..1a9ba999 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -442,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -494,17 +479,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +518,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -625,6 +584,45 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -635,6 +633,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -955,6 +989,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1114,6 +1164,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1366,16 +1436,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1595,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1620,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1923,6 +2009,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(n int, list *_Gid_t) (nn int, err error) { r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) nn = int(r0) @@ -1944,6 +2040,21 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -1969,6 +2080,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index c01b3b6b..b26aee95 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -14,6 +14,31 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -118,13 +143,13 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -133,13 +158,70 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(arg2) if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -148,8 +230,19 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -158,15 +251,14 @@ func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) if e1 != 0 { err = errnoErr(e1) } @@ -175,9 +267,15 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -251,6 +349,33 @@ func Acct(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Adjtimex(buf *Timex) (state int, err error) { r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) state = int(r0) @@ -344,8 +469,8 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -355,9 +480,8 @@ func EpollCreate(size int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -366,8 +490,9 @@ func EpollCreate1(flag int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -377,7 +502,7 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -428,21 +553,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -528,7 +638,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -536,7 +646,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -593,7 +703,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -692,6 +802,33 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -714,6 +851,74 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -754,6 +959,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -794,6 +1010,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -851,6 +1078,32 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setdomainname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -960,8 +1213,33 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1010,7 +1288,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1052,16 +1330,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1169,14 +1437,24 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1185,8 +1463,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1262,6 +1546,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, buf *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { @@ -1283,7 +1582,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1291,7 +1590,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1299,7 +1598,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1317,7 +1616,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } @@ -1788,6 +2087,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -1813,6 +2127,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index fb4b9627..f1874d5a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -571,6 +571,235 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index beac82ef..eb802839 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -571,6 +571,235 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 7bd5f60b..7b36499d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -571,6 +571,235 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 49b3b5e8..1942049b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c4c7d854..d351c72c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 210285b0..617d47f0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 39789630..e2e5fc5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -41,6 +41,7 @@ import ( //go:cgo_import_dynamic libc_dup dup "libc.so" //go:cgo_import_dynamic libc_dup2 dup2 "libc.so" //go:cgo_import_dynamic libc_exit exit "libc.so" +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" //go:cgo_import_dynamic libc_fchdir fchdir "libc.so" //go:cgo_import_dynamic libc_fchmod fchmod "libc.so" //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" @@ -169,6 +170,7 @@ import ( //go:linkname procDup libc_dup //go:linkname procDup2 libc_dup2 //go:linkname procExit libc_exit +//go:linkname procFaccessat libc_faccessat //go:linkname procFchdir libc_fchdir //go:linkname procFchmod libc_fchmod //go:linkname procFchmodat libc_fchmodat @@ -298,6 +300,7 @@ var ( procDup, procDup2, procExit, + procFaccessat, procFchdir, procFchmod, procFchmodat, @@ -695,6 +698,19 @@ func Exit(code int) { return } +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 83bb935b..b005031a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 207b2793..90c95c2c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build amd64,openbsd diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 83bb935b..b005031a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index b64a8122..1ab8780c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -7,347 +7,397 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 81722ac9..b66f900d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -7,347 +7,397 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 44883141..d61941ba 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -7,347 +7,397 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 95ab1290..8f33ece7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -387,4 +387,6 @@ const ( SYS_PKEY_FREE = 382 SYS_STATX = 383 SYS_ARCH_PRCTL = 384 + SYS_IO_PGETEVENTS = 385 + SYS_RSEQ = 386 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index c5dabf2e..70c1a2c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -339,4 +339,6 @@ const ( SYS_PKEY_ALLOC = 330 SYS_PKEY_FREE = 331 SYS_STATX = 332 + SYS_IO_PGETEVENTS = 333 + SYS_RSEQ = 334 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index ab7fa5fd..a1db143f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -359,4 +359,5 @@ const ( SYS_PKEY_ALLOC = 395 SYS_PKEY_FREE = 396 SYS_STATX = 397 + SYS_RSEQ = 398 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index b1c6b4bd..2e4cee70 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -283,4 +283,5 @@ const ( SYS_PKEY_ALLOC = 289 SYS_PKEY_FREE = 290 SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 2e9aa7a3..16714491 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -372,4 +372,6 @@ const ( SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 92827635..1270a1c9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -332,4 +332,6 @@ const ( SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 45bd3fd6..93980be1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -332,4 +332,6 @@ const ( SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 62ccac4b..0fc77261 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -372,4 +372,6 @@ const ( SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 384d49bf..a5c5f3de 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -370,4 +370,6 @@ const ( SYS_PKEY_ALLOC = 384 SYS_PKEY_FREE = 385 SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 9623248a..5470eadb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -370,4 +370,6 @@ const ( SYS_PKEY_ALLOC = 384 SYS_PKEY_FREE = 385 SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go new file mode 100644 index 00000000..41e4fd1d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -0,0 +1,286 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index ed92409d..de0245a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -331,4 +331,7 @@ const ( SYS_S390_GUARDED_STORAGE = 378 SYS_STATX = 379 SYS_S390_STHYI = 380 + SYS_KEXEC_FILE_LOAD = 381 + SYS_IO_PGETEVENTS = 382 + SYS_RSEQ = 383 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index c9c129dc..2d099367 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,5 +1,5 @@ // mksysnum_linux.pl -Ilinux/usr/include -m64 -D__arch64__ linux/usr/include/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build sparc64,linux diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 8afda9c4..f0daa05a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build 386,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index aea8dbec..ddb25b94 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build amd64,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index c6158a7e..315bd63f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build arm,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 3e8ce2a1..07787301 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build 386,openbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 10edff07..bc7fa579 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -116,6 +116,7 @@ const ( SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \ + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, \ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } SYS_READV = 120 // { ssize_t sys_readv(int fd, \ diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 32653e53..7a1693ac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build arm,openbsd diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go new file mode 100644 index 00000000..f1cfe7db --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -0,0 +1,345 @@ +// cgo -godefs types_aix.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc,aix + +package unix + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + PathMax = 0x3ff +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type off64 int64 +type off int32 +type Mode_t uint32 + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type StTimespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Timex struct{} + +type Time_t int32 + +type Tms struct{} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Timezone struct { + Minuteswest int32 + Dsttime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Pid_t int32 + +type _Gid_t uint32 + +type dev_t uint32 + +type Stat_t struct { + Dev uint32 + Ino uint32 + Mode uint32 + Nlink int16 + Flag uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Size int32 + Atim StTimespec + Mtim StTimespec + Ctim StTimespec + Blksize int32 + Blocks int32 + Vfstype int32 + Vfs uint32 + Type uint32 + Gen uint32 + Reserved [9]uint32 +} + +type StatxTimestamp struct{} + +type Statx_t struct{} + +type Dirent struct { + Offset uint32 + Ino uint32 + Reclen uint16 + Namlen uint16 + Name [256]uint8 +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [1023]uint8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [1012]uint8 +} + +type _Socklen uint32 + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 +) + +const ( + SizeofIfMsghdr = 0x10 +) + +type IfMsgHdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Addrlen uint8 + _ [1]byte +} + +type FdSet struct { + Bits [2048]int32 +} + +type Utsname struct { + Sysname [32]byte + Nodename [32]byte + Release [32]byte + Version [32]byte + Machine [32]byte +} + +type Ustat_t struct{} + +type Sigset_t struct { + Losigs uint32 + Hisigs uint32 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x1 + AT_SYMLINK_NOFOLLOW = 0x1 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [16]uint8 +} + +type Termio struct { + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line uint8 + Cc [8]uint8 + _ [1]byte +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type PollFd struct { + Fd int32 + Events uint16 + Revents uint16 +} + +const ( + POLLERR = 0x4000 + POLLHUP = 0x2000 + POLLIN = 0x1 + POLLNVAL = 0x8000 + POLLOUT = 0x2 + POLLPRI = 0x4 + POLLRDBAND = 0x20 + POLLRDNORM = 0x10 + POLLWRBAND = 0x40 + POLLWRNORM = 0x2 +) + +type Flock_t struct { + Type int16 + Whence int16 + Sysid uint32 + Pid int32 + Vfs int32 + Start int64 + Len int64 +} + +type Fsid_t struct { + Val [2]uint32 +} +type Fsid64_t struct { + Val [2]uint64 +} + +type Statfs_t struct { + Version int32 + Type int32 + Bsize uint32 + Blocks uint32 + Bfree uint32 + Bavail uint32 + Files uint32 + Ffree uint32 + Fsid Fsid_t + Vfstype int32 + Fsize uint32 + Vfsnumber int32 + Vfsoff int32 + Vfslen int32 + Vfsvers int32 + Fname [32]uint8 + Fpack [32]uint8 + Name_max int32 +} + +const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go new file mode 100644 index 00000000..95581a3b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -0,0 +1,354 @@ +// cgo -godefs types_aix.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build ppc64,aix + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x3ff +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type off64 int64 +type off int64 +type Mode_t uint32 + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type StTimespec struct { + Sec int64 + Nsec int32 + _ [4]byte +} + +type Timeval struct { + Sec int64 + Usec int32 + _ [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Timex struct{} + +type Time_t int64 + +type Tms struct{} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Timezone struct { + Minuteswest int32 + Dsttime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Pid_t int32 + +type _Gid_t uint32 + +type dev_t uint64 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink int16 + Flag uint16 + Uid uint32 + Gid uint32 + Rdev uint64 + Ssize int32 + _ [4]byte + Atim StTimespec + Mtim StTimespec + Ctim StTimespec + Blksize int64 + Blocks int64 + Vfstype int32 + Vfs uint32 + Type uint32 + Gen uint32 + Reserved [9]uint32 + Padto_ll uint32 + Size int64 +} + +type StatxTimestamp struct{} + +type Statx_t struct{} + +type Dirent struct { + Offset uint64 + Ino uint64 + Reclen uint16 + Namlen uint16 + Name [256]uint8 + _ [4]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [1023]uint8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [1012]uint8 +} + +type _Socklen uint32 + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen int32 + _ [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 +) + +const ( + SizeofIfMsghdr = 0x10 +) + +type IfMsgHdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Addrlen uint8 + _ [1]byte +} + +type FdSet struct { + Bits [1024]int64 +} + +type Utsname struct { + Sysname [32]byte + Nodename [32]byte + Release [32]byte + Version [32]byte + Machine [32]byte +} + +type Ustat_t struct{} + +type Sigset_t struct { + Set [4]uint64 +} + +const ( + AT_FDCWD = -0x2 + AT_REMOVEDIR = 0x1 + AT_SYMLINK_NOFOLLOW = 0x1 +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [16]uint8 +} + +type Termio struct { + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line uint8 + Cc [8]uint8 + _ [1]byte +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type PollFd struct { + Fd int32 + Events uint16 + Revents uint16 +} + +const ( + POLLERR = 0x4000 + POLLHUP = 0x2000 + POLLIN = 0x1 + POLLNVAL = 0x8000 + POLLOUT = 0x2 + POLLPRI = 0x4 + POLLRDBAND = 0x20 + POLLRDNORM = 0x10 + POLLWRBAND = 0x40 + POLLWRNORM = 0x2 +) + +type Flock_t struct { + Type int16 + Whence int16 + Sysid uint32 + Pid int32 + Vfs int32 + Start int64 + Len int64 +} + +type Fsid_t struct { + Val [2]uint32 +} +type Fsid64_t struct { + Val [2]uint64 +} + +type Statfs_t struct { + Version int32 + Type int32 + Bsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid64_t + Vfstype int32 + _ [4]byte + Fsize uint64 + Vfsnumber int32 + Vfsoff int32 + Vfslen int32 + Vfsvers int32 + Fname [32]uint8 + Fpack [32]uint8 + Name_max int32 + _ [4]byte +} + +const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 315a553b..c01ae670 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Ino uint64 Nlink uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 878a21ad..8006c563 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Dev uint32 Ino uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 8408af12..716774de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Dev uint32 Ino uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 4b2d9a48..92e07b00 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -58,23 +58,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Dev uint32 Ino uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 40474620..0f3106fa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -248,6 +248,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -271,6 +278,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -401,9 +416,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -433,6 +450,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -501,6 +519,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -705,6 +737,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1813,3 +1847,121 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 2ab0cb9e..24d6c6fd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -273,6 +280,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -405,9 +420,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -437,6 +454,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -505,6 +523,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -723,6 +755,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1833,3 +1867,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 18d577b9..77c7a3e1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -274,6 +281,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -404,9 +419,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -436,6 +453,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -504,6 +522,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -693,6 +725,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1802,3 +1836,122 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6ea80563..044d97ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -274,6 +281,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -406,9 +421,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -438,6 +455,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -506,6 +524,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -702,6 +734,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1812,3 +1846,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index e0a39322..46e6fdcc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -272,6 +279,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -402,9 +417,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -434,6 +451,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -502,6 +520,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -697,6 +729,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1807,3 +1841,122 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 838135e2..a66849e3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -274,6 +281,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -406,9 +421,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -438,6 +455,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -506,6 +524,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -704,6 +736,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1814,3 +1848,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index a9d0131f..bf3eca7f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -274,6 +281,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -406,9 +421,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -438,6 +455,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -506,6 +524,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -704,6 +736,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1814,3 +1848,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4f6f1455..4aefec3f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -272,6 +279,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -402,9 +417,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -434,6 +451,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -502,6 +520,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -697,6 +729,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1807,3 +1841,122 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 9ddd4701..81b32262 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -275,6 +282,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -407,9 +422,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -439,6 +456,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -507,6 +525,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -712,6 +744,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1822,3 +1856,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3a5cc696..9ba53b4e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -275,6 +282,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -407,9 +422,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -439,6 +456,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -507,6 +525,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -712,6 +744,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1822,3 +1856,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go new file mode 100644 index 00000000..cf39aa01 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -0,0 +1,1995 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]uint8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x31 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Pc uint64 + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T0 uint64 + T1 uint64 + T2 uint64 + S0 uint64 + S1 uint64 + A0 uint64 + A1 uint64 + A2 uint64 + A3 uint64 + A4 uint64 + A5 uint64 + A6 uint64 + A7 uint64 + S2 uint64 + S3 uint64 + S4 uint64 + S5 uint64 + S6 uint64 + S7 uint64 + S8 uint64 + S9 uint64 + S10 uint64 + S11 uint64 + T3 uint64 + T4 uint64 + T5 uint64 + T6 uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]uint8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 032a71bb..6b27e16b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -273,6 +280,14 @@ type RawSockaddrVM struct { Zero [4]uint8 } +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -405,9 +420,11 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -437,6 +454,7 @@ const ( IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 @@ -505,6 +523,20 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 @@ -728,6 +760,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1839,3 +1873,123 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 4b86fb2b..9e9088de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -446,3 +446,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 9048a509..ed3f1736 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -453,3 +453,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 00525e7b..d263b614 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -451,3 +451,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index d5a2d75d..231f4e8e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Mode uint32 Dev int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a9c8184..bb2c4488 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Mode uint32 Dev int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index e35b13b6..941367ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -56,23 +56,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Mode uint32 Dev int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 2248598d..0543e1a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -75,23 +75,6 @@ type Rlimit struct { type _Gid_t uint32 -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - type Stat_t struct { Dev uint64 Ino uint64 diff --git a/vendor/golang.org/x/sys/windows/BUILD.bazel b/vendor/golang.org/x/sys/windows/BUILD.bazel deleted file mode 100644 index a7944431..00000000 --- a/vendor/golang.org/x/sys/windows/BUILD.bazel +++ /dev/null @@ -1,55 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "mksyscall.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "dll_windows.go", - "env_windows.go", - "eventlog.go", - "exec_windows.go", - "memory_windows.go", - "race0.go", - "security_windows.go", - "service.go", - "str.go", - "syscall.go", - "syscall_windows.go", - "types_windows.go", - "zsyscall_windows.go", - ], - "//conditions:default": [], - }) + select({ - "@io_bazel_rules_go//go/platform:windows_386": [ - "asm_windows_386.s", - "types_windows_386.go", - ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "asm_windows_amd64.s", - "types_windows_amd64.go", - ], - "//conditions:default": [], - }), - importpath = "golang.org/x/sys/windows", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_xtest", - srcs = select({ - "@io_bazel_rules_go//go/platform:windows": [ - "syscall_test.go", - "syscall_windows_test.go", - ], - "//conditions:default": [], - }), - importpath = "golang.org/x/sys/windows_test", - deps = select({ - "@io_bazel_rules_go//go/platform:windows": [ - ":go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go new file mode 100644 index 00000000..af3af60d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build go1.9 + +package windows + +import "syscall" + +type Errno = syscall.Errno +type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/asm_windows_arm.s b/vendor/golang.org/x/sys/windows/asm_windows_arm.s new file mode 100644 index 00000000..55d8b91a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_arm.s @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·getprocaddress(SB),NOSPLIT,$0 + B syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB),NOSPLIT,$0 + B syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 24aa90bb..62fc31b4 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -43,6 +43,11 @@ const ( SC_STATUS_PROCESS_INFO = 0 + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + SERVICE_STOPPED = 1 SERVICE_START_PENDING = 2 SERVICE_STOP_PENDING = 3 @@ -148,6 +153,19 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW diff --git a/vendor/golang.org/x/sys/windows/syscall_test.go b/vendor/golang.org/x/sys/windows/syscall_test.go deleted file mode 100644 index d7009e44..00000000 --- a/vendor/golang.org/x/sys/windows/syscall_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows_test - -import ( - "syscall" - "testing" - - "golang.org/x/sys/windows" -) - -func testSetGetenv(t *testing.T, key, value string) { - err := windows.Setenv(key, value) - if err != nil { - t.Fatalf("Setenv failed to set %q: %v", value, err) - } - newvalue, found := windows.Getenv(key) - if !found { - t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) - } - if newvalue != value { - t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) - } -} - -func TestEnv(t *testing.T) { - testSetGetenv(t, "TESTENV", "AVALUE") - // make sure TESTENV gets set to "", not deleted - testSetGetenv(t, "TESTENV", "") -} - -func TestGetProcAddressByOrdinal(t *testing.T) { - // Attempt calling shlwapi.dll:IsOS, resolving it by ordinal, as - // suggested in - // https://msdn.microsoft.com/en-us/library/windows/desktop/bb773795.aspx - h, err := windows.LoadLibrary("shlwapi.dll") - if err != nil { - t.Fatalf("Failed to load shlwapi.dll: %s", err) - } - procIsOS, err := windows.GetProcAddressByOrdinal(h, 437) - if err != nil { - t.Fatalf("Could not find shlwapi.dll:IsOS by ordinal: %s", err) - } - const OS_NT = 1 - r, _, _ := syscall.Syscall(procIsOS, 1, OS_NT, 0, 0) - if r == 0 { - t.Error("shlwapi.dll:IsOS(OS_NT) returned 0, expected non-zero value") - } -} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1e9f4bb4..8a00b71f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -112,12 +112,14 @@ func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } @@ -653,7 +655,7 @@ type RawSockaddr struct { type RawSockaddrAny struct { Addr RawSockaddr - Pad [96]int8 + Pad [100]int8 } type Sockaddr interface { @@ -702,19 +704,69 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } +type RawSockaddrUnix struct { + Family uint16 + Path [UNIX_PATH_MAX]int8 +} + type SockaddrUnix struct { Name string + raw RawSockaddrUnix } func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { - // TODO(brainman): implement SockaddrUnix.sockaddr() - return nil, 0, syscall.EWINDOWS + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, syscall.EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := int32(2) + if n > 0 { + sl += int32(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil } func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: - return nil, syscall.EWINDOWS + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil case AF_INET: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/vendor/golang.org/x/sys/windows/syscall_windows_test.go deleted file mode 100644 index 0e27464e..00000000 --- a/vendor/golang.org/x/sys/windows/syscall_windows_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - "unsafe" - - "golang.org/x/sys/windows" -) - -func TestWin32finddata(t *testing.T) { - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatalf("failed to create temp directory: %v", err) - } - defer os.RemoveAll(dir) - - path := filepath.Join(dir, "long_name.and_extension") - f, err := os.Create(path) - if err != nil { - t.Fatalf("failed to create %v: %v", path, err) - } - f.Close() - - type X struct { - fd windows.Win32finddata - got byte - pad [10]byte // to protect ourselves - - } - var want byte = 2 // it is unlikely to have this character in the filename - x := X{got: want} - - pathp, _ := windows.UTF16PtrFromString(path) - h, err := windows.FindFirstFile(pathp, &(x.fd)) - if err != nil { - t.Fatalf("FindFirstFile failed: %v", err) - } - err = windows.FindClose(h) - if err != nil { - t.Fatalf("FindClose failed: %v", err) - } - - if x.got != want { - t.Fatalf("memory corruption: want=%d got=%d", want, x.got) - } -} - -func TestFormatMessage(t *testing.T) { - dll := windows.MustLoadDLL("pdh.dll") - - pdhOpenQuery := func(datasrc *uint16, userdata uint32, query *windows.Handle) (errno uintptr) { - r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhOpenQueryW").Addr(), 3, uintptr(unsafe.Pointer(datasrc)), uintptr(userdata), uintptr(unsafe.Pointer(query))) - return r0 - } - - pdhCloseQuery := func(query windows.Handle) (errno uintptr) { - r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhCloseQuery").Addr(), 1, uintptr(query), 0, 0) - return r0 - } - - var q windows.Handle - name, err := windows.UTF16PtrFromString("no_such_source") - if err != nil { - t.Fatal(err) - } - errno := pdhOpenQuery(name, 0, &q) - if errno == 0 { - pdhCloseQuery(q) - t.Fatal("PdhOpenQuery succeeded, but expected to fail.") - } - - const flags uint32 = syscall.FORMAT_MESSAGE_FROM_HMODULE | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS - buf := make([]uint16, 300) - _, err = windows.FormatMessage(flags, uintptr(dll.Handle), uint32(errno), 0, buf, nil) - if err != nil { - t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, errno, err) - } -} - -func abort(funcname string, err error) { - panic(funcname + " failed: " + err.Error()) -} - -func ExampleLoadLibrary() { - h, err := windows.LoadLibrary("kernel32.dll") - if err != nil { - abort("LoadLibrary", err) - } - defer windows.FreeLibrary(h) - proc, err := windows.GetProcAddress(h, "GetVersion") - if err != nil { - abort("GetProcAddress", err) - } - r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0, 0) - major := byte(r) - minor := uint8(r >> 8) - build := uint16(r >> 16) - print("windows version ", major, ".", minor, " (Build ", build, ")\n") -} - -func TestTOKEN_ALL_ACCESS(t *testing.T) { - if windows.TOKEN_ALL_ACCESS != 0xF01FF { - t.Errorf("TOKEN_ALL_ACCESS = %x, want 0xF01FF", windows.TOKEN_ALL_ACCESS) - } -} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 52c2037b..141ca81b 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -94,16 +94,29 @@ const ( FILE_APPEND_DATA = 0x00000004 FILE_WRITE_ATTRIBUTES = 0x00000100 - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 INVALID_FILE_ATTRIBUTES = 0xffffffff @@ -257,15 +270,87 @@ const ( USAGE_MATCH_TYPE_AND = 0 USAGE_MATCH_TYPE_OR = 1 + /* msgAndCertEncodingType values for CertOpenStore function */ X509_ASN_ENCODING = 0x00000001 PKCS_7_ASN_ENCODING = 0x00010000 - CERT_STORE_PROV_MEMORY = 2 - - CERT_STORE_ADD_ALWAYS = 4 - + /* storeProvider values for CertOpenStore function */ + CERT_STORE_PROV_MSG = 1 + CERT_STORE_PROV_MEMORY = 2 + CERT_STORE_PROV_FILE = 3 + CERT_STORE_PROV_REG = 4 + CERT_STORE_PROV_PKCS7 = 5 + CERT_STORE_PROV_SERIALIZED = 6 + CERT_STORE_PROV_FILENAME_A = 7 + CERT_STORE_PROV_FILENAME_W = 8 + CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W + CERT_STORE_PROV_SYSTEM_A = 9 + CERT_STORE_PROV_SYSTEM_W = 10 + CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W + CERT_STORE_PROV_COLLECTION = 11 + CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 + CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 + CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W + CERT_STORE_PROV_PHYSICAL_W = 14 + CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W + CERT_STORE_PROV_SMART_CARD_W = 15 + CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W + CERT_STORE_PROV_LDAP_W = 16 + CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W + CERT_STORE_PROV_PKCS12 = 17 + + /* store characteristics (low WORD of flag) for CertOpenStore function */ + CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 + CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 - + CERT_STORE_DELETE_FLAG = 0x00000010 + CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 + CERT_STORE_SHARE_STORE_FLAG = 0x00000040 + CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 + CERT_STORE_MANIFOLD_FLAG = 0x00000100 + CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 + CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 + CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 + CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 + CERT_STORE_CREATE_NEW_FLAG = 0x00002000 + CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 + CERT_STORE_READONLY_FLAG = 0x00008000 + + /* store locations (high WORD of flag) for CertOpenStore function */ + CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 + CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 + CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 + CERT_SYSTEM_STORE_SERVICES = 0x00050000 + CERT_SYSTEM_STORE_USERS = 0x00060000 + CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 + CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 + CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 + + /* Miscellaneous high-WORD flags for CertOpenStore function */ + CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 + CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 + CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 + CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 + CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 + CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 + CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 + CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 + CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 + CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 + CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 + + /* addDisposition values for CertAddCertificateContextToStore function */ + CERT_STORE_ADD_NEW = 1 + CERT_STORE_ADD_USE_EXISTING = 2 + CERT_STORE_ADD_REPLACE_EXISTING = 3 + CERT_STORE_ADD_ALWAYS = 4 + CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 + CERT_STORE_ADD_NEWER = 6 + CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 + + /* ErrorStatus values for CertTrustStatus struct */ CERT_TRUST_NO_ERROR = 0x00000000 CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 CERT_TRUST_IS_REVOKED = 0x00000004 @@ -282,11 +367,31 @@ const ( CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 + CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 + CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 + CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 + CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + /* InfoStatus values for CertTrustStatus struct */ + CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 + CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 + CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 + CERT_TRUST_IS_SELF_SIGNED = 0x00000008 + CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 + CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 + CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 + CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 + CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 + CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 + CERT_TRUST_IS_CA_TRUSTED = 0x00004000 + CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + + /* policyOID values for CertVerifyCertificateChainPolicy function */ CERT_CHAIN_POLICY_BASE = 1 CERT_CHAIN_POLICY_AUTHENTICODE = 2 CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 @@ -295,6 +400,7 @@ const ( CERT_CHAIN_POLICY_NT_AUTH = 6 CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 CERT_CHAIN_POLICY_EV = 8 + CERT_CHAIN_POLICY_SSL_F12 = 9 CERT_E_EXPIRED = 0x800B0101 CERT_E_ROLE = 0x800B0103 @@ -302,8 +408,16 @@ const ( CERT_E_UNTRUSTEDROOT = 0x800B0109 CERT_E_CN_NO_MATCH = 0x800B010F + /* AuthType values for SSLExtraCertChainPolicyPara struct */ AUTHTYPE_CLIENT = 1 AUTHTYPE_SERVER = 2 + + /* Checks values for SSLExtraCertChainPolicyPara struct */ + SECURITY_FLAG_IGNORE_REVOCATION = 0x00000080 + SECURITY_FLAG_IGNORE_UNKNOWN_CA = 0x00000100 + SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200 + SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000 + SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 ) var ( @@ -312,6 +426,14 @@ var ( OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") ) +// Pointer represents a pointer to an arbitrary Windows type. +// +// Pointer-typed fields may point to one of many different types. It's +// up to the caller to provide a pointer to the appropriate type, cast +// to Pointer. The caller must obey the unsafe.Pointer rules while +// doing so. +type Pointer *struct{} + // Invented values to support what package os expects. type Timeval struct { Sec int32 @@ -880,11 +1002,15 @@ type MibIfRow struct { Descr [MAXLEN_IFDESCR]byte } +type CertInfo struct { + // Not implemented +} + type CertContext struct { EncodingType uint32 EncodedCert *byte Length uint32 - CertInfo uintptr + CertInfo *CertInfo Store Handle } @@ -899,12 +1025,16 @@ type CertChainContext struct { RevocationFreshnessTime uint32 } +type CertTrustListInfo struct { + // Not implemented +} + type CertSimpleChain struct { Size uint32 TrustStatus CertTrustStatus NumElements uint32 Elements **CertChainElement - TrustListInfo uintptr + TrustListInfo *CertTrustListInfo HasRevocationFreshnessTime uint32 RevocationFreshnessTime uint32 } @@ -919,14 +1049,18 @@ type CertChainElement struct { ExtendedErrorInfo *uint16 } +type CertRevocationCrlInfo struct { + // Not implemented +} + type CertRevocationInfo struct { Size uint32 RevocationResult uint32 RevocationOid *byte - OidSpecificInfo uintptr + OidSpecificInfo Pointer HasFreshnessTime uint32 FreshnessTime uint32 - CrlInfo uintptr // *CertRevocationCrlInfo + CrlInfo *CertRevocationCrlInfo } type CertTrustStatus struct { @@ -957,7 +1091,7 @@ type CertChainPara struct { type CertChainPolicyPara struct { Size uint32 Flags uint32 - ExtraPolicyPara uintptr + ExtraPolicyPara Pointer } type SSLExtraCertChainPolicyPara struct { @@ -972,7 +1106,7 @@ type CertChainPolicyStatus struct { Error uint32 ChainIndex uint32 ElementIndex uint32 - ExtraPolicyStatus uintptr + ExtraPolicyStatus Pointer } const ( @@ -1319,7 +1453,7 @@ type SmallRect struct { Bottom int16 } -// Used with GetConsoleScreenBuffer to retreive information about a console +// Used with GetConsoleScreenBuffer to retrieve information about a console // screen buffer. See // https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str // for details. @@ -1331,3 +1465,5 @@ type ConsoleScreenBufferInfo struct { Window SmallRect MaximumWindowSize Coord } + +const UNIX_PATH_MAX = 108 // defined in afunix.h diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go new file mode 100644 index 00000000..74571e36 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 318c6163..fc56aec0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package windows