diff --git a/.dockerignore b/.dockerignore index ad4a9beb..34f27cd6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,7 @@ .git dist -.dockerignore -.rockertmp* -Rockerfile +**/.a pkg vendor/pkg -Rockerfile.exec -Rockerfile.build-cross +example +rsync diff --git a/Makefile b/Makefile index a5061766..f1093c3d 100644 --- a/Makefile +++ b/Makefile @@ -36,10 +36,17 @@ UPLOAD_CMD = $(GITHUB_RELEASE) upload \ SRCS = $(shell find . -name '*.go' | grep -v '^./vendor/') PKGS := $(foreach pkg, $(sort $(dir $(SRCS))), $(pkg)) -GOPATH ?= $(shell pwd):$(shell pwd)/vendor - TESTARGS ?= +binary: + GOPATH=$(shell pwd):$(shell pwd)/vendor go build \ + -ldflags "-X main.Version=$(VERSION) -X main.GitCommit=$(GITCOMMIT) -X main.GitBranch=$(GITBRANCH) -X main.BuildTime=$(BUILDTIME)" \ + -v -o bin/rocker src/cmd/rocker/main.go + +install: + cp bin/rocker /usr/local/bin/rocker + chmod +x /usr/local/bin/rocker + all: $(ALL_BINARIES) $(foreach BIN, $(BINARIES), $(shell cp dist/$(VERSION)/$(shell go env GOOS)/amd64/$(BIN) dist/$(BIN))) @@ -75,18 +82,9 @@ build_image: docker_image: rocker build -var Version=$(VERSION) -install: - cp dist/$(VERSION)/$(shell go env GOOS)/amd64/rocker /usr/local/bin/rocker - chmod +x /usr/local/bin/rocker - clean: rm -Rf dist -local_binary: - go build \ - -ldflags "-X main.Version=$(VERSION) -X main.GitCommit=$(GITCOMMIT) -X main.GitBranch=$(GITBRANCH) -X main.BuildTime=$(BUILDTIME)" \ - -v -o bin/rocker src/cmd/rocker/main.go - testdeps: @ go get github.com/GeertJohan/fgt @ go get github.com/constabulary/gb/... diff --git a/README.md b/README.md index e53380d2..5e63a97a 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,21 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are missing while keeping Docker’s original design and idea. Read the [blog post](http://tech.grammarly.com/blog/posts/Making-Docker-Rock-at-Grammarly.html) about how and why it was invented. +# *v1 NOTE* +Rocker has been rewritten from scratch and now it became much more robust! While [dockramp](https://github.com/jlhawn/dockramp) as a proof of concept of a client-driven Docker builder, Rocker is a full-featured implementation. + +1. There are no context uploads and fallbacks to `docker build`. It makes your builds faster especially if you have a big project. +2. Cache lookup works much faster than Docker's implementation when you have thousands of layers. +3. Better output: rocker reports size for each produced layer, so you see which steps take space. +4. Works with Docker >= 1.8 + +What is not supported yet: + +1. `ADD ` +2. Adding tar archives that supposed to automatically extract + +--- + * [Installation](#installation) * [Rockerfile](#rockerfile) * [MOUNT](#mount) @@ -10,8 +25,6 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are m * [TAG](#tag) * [PUSH](#push) * [Templating](#templating) - * [REQUIRE](#require) - * [INCLUDE](#include) * [ATTACH](#attach) * [Where to go next?](#where-to-go-next) * [Contributing](#contributing) @@ -35,7 +48,16 @@ Go to the [releases](https://github.com/grammarly/rocker/releases) section and d Something like this: ```bash -curl -SL https://github.com/grammarly/rocker/releases/download/0.2.2/rocker-0.2.2_darwin_amd64.tar.gz | tar -xzC /usr/local/bin && chmod +x /usr/local/bin/rocker +curl -SL https://github.com/grammarly/rocker/releases/download/1.0.0/rocker-1.0.0_darwin_amd64.tar.gz | tar -xzC /usr/local/bin && chmod +x /usr/local/bin/rocker +``` + +### Building locally + +`make` will produce the `bin/rocker` binary. + +```bash +make +make install ``` ### Getting help, usage: @@ -63,7 +85,9 @@ The most challenging part is caching. While implementing those features seems to ### How does it work -Rocker parses the Rockerfile into an AST using the same library Docker uses for parsing Dockerfiles. Then it goes through the instructions and makes a decision, should it execute a command on its own or delegate it to Docker. Internally, Rocker splits a Rockerfile into slices, some of them are executed through Docker’s remote API, some are sent as regular Dockerfiles underneath. This allows to not reimplement the whole thing — only add custom stuff. So if you have a plain Dockerfile, Rocker will not find any custom commands, so it will just pass it straight to Docker. +Rocker parses the Rockerfile into an AST using the same library Docker uses for parsing Dockerfiles. Then it builds a [plan](/src/rocker/build/plan.go) out of instructions and yields a list of commands. For every command there is a function in [commands.go](/src/rocker/build/commands.go) though in the future we will make it extensible. + +The more detailed documentation of internals will come later. # MOUNT @@ -78,10 +102,6 @@ or ```bash MOUNT .:/src ``` -or -```bash -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa -``` `MOUNT` is used to share volumes between builds, so they can be reused by tools like dependency management. There are two types of mounts: @@ -90,20 +110,20 @@ MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa Volume container names are hashed with Rockerfile’s full path and the directories it shares. So as long as your Rockerfile has the same name and it is in the same place — same volume containers will be used. -Note that Rocker is not tracking changes in mounted directories, so no changes can affect caching. Cache will be busted only if you change list of mounts, add or remove them. In future, we may add some configuration flags, so you can specify if you want to watch the actual mount contents changes, and make them invalidate the cache (for example $GIT_SSH_KEY contents may change). +Note that Rocker is not tracking changes in mounted directories, so no changes can affect caching. Cache will be busted only if you change list of mounts, add or remove them. In future, we may add some configuration flags, so you can specify if you want to watch the actual mount contents changes, and make them invalidate the cache. -To force cache invalidation you can always use `--no-cache` flag for `rocker build` command. But you will then need a lot of patience. +To force cache invalidation you can always use `--no-cache` or `--reload-cache` flags for `rocker build` command. But you will then need a lot of patience. **Example usage** ```bash FROM grammarly/nodejs:latest -ADD . /src #1 +ADD . /src #1 WORKDIR /src -MOUNT /src/node_modules /src/bower_components #2 -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa #3 -RUN npm install #4 -RUN cp -R /src /app #5 +MOUNT /src/node_modules /src/bower_components #2 +MOUNT {{ .Env.GIT_SSH_KEY }}:/root/.ssh/id_rsa #3 +RUN npm install #4 +RUN cp -R /src /app #5 WORKDIR /app CMD ["/usr/bin/node", "index.js"] ``` @@ -327,66 +347,6 @@ CMD ["/bin/rocker"] PUSH grammarly/rocker:0.1.22 ``` -# REQUIRE - -```bash -REQUIRE foo -``` -or -```bash -REQUIRE ["foo", "bar"] -``` - -Useful when you use variables, for example for image name or tag (as shown above). In such case, you should specify the variable because otherwise the build doesn't make sense. - -`REQUIRE` does not affect the cache and it doesn't produce any layers. - -**Usage** -```bash -FROM google/golang:1.4 -… -CMD ["/bin/rocker"] -REQUIRE Version -PUSH grammarly/rocker:{{ .Version }} -``` - -So if we run the build not specifying the version variable (like `-var "Version=123"`), it will fail -```bash -$ rocker build -... -Error: Var $Version is required but not set -``` - -# INCLUDE - -```bash -INCLUDE path/to/mixin -``` -or -```bash -INCLUDE ../../path/to/mixin -``` - -Adds ability to include other Dockerfiles or Rockerfiles into your file. Useful if you have some collections of mixins on the side, such as a recipe to install nodejs or python, and want to use them. - -1. Paths passed to `INCLUDE` are relative to the Rockerfile's directory. -2. It is not allowed to nest includes, e.g. use `INCLUDE` in files which are being included. - -**Usage** -```bash -# includes/install_nodejs -RUN apt-get install nodejs -``` - -```bash -# Rockerfile -FROM debian:jessie -INCLUDE includes/install_nodejs -ADD . /src -WORKDIR /src -CMD ["node", "app.js"] -``` - # ATTACH ```bash ATTACH @@ -406,7 +366,7 @@ FROM phusion/passenger-ruby22 WORKDIR /src MOUNT /var/lib/gems -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa +MOUNT {{ .Env.GIT_SSH_KEY }}:/root/.ssh/id_rsa MOUNT .:/src RUN ["bundle", "install"] @@ -445,7 +405,7 @@ gb build or build for all platforms: ```bash -make +make all ``` If you have a github access token, you can also do a github release: @@ -477,18 +437,6 @@ gb test rocker/... -run TestMyFunction # TODO -- [x] Correctly handle streaming TTY from Docker, so we can show fancy progress bars -- [x] rocker build --attach? possibly allow to attach to a running container within build, so can run interactively; may be useful for dev images -- [ ] run own tar stream so there is no need to put a generated dockerfile into a working directory -- [ ] write reamde about rocker cli -- [ ] colorful output for terminals -- [ ] Should the same mounts be reused between different FROMs? -- [ ] rocker inspect; inspecting a Rockerfile - whilch mount/export containers are there -- [ ] SQUASH as discussed [here](https://github.com/docker/docker/issues/332) -- [ ] do not store properties in an image -- [ ] Read Rockerfile from stdin -- [ ] Make more TODOs here - ```bash grep -R TODO **/*.go | grep -v '^vendor/' ``` diff --git a/VERSION b/VERSION index 71790396..3eefcb9d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.2.3 +1.0.0 diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 18c0a4e5..0d042553 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -17,22 +17,24 @@ package main import ( - "encoding/json" "fmt" - "log" "os" - "path" "path/filepath" "strings" "rocker/build" + "rocker/debugtrap" "rocker/dockerclient" - "rocker/git" - "rocker/imagename" "rocker/template" + "rocker/textformatter" + "rocker/util" "github.com/codegangsta/cli" + "github.com/docker/docker/pkg/units" + "github.com/fatih/color" "github.com/fsouza/go-dockerclient" + + log "github.com/Sirupsen/logrus" ) var ( @@ -49,6 +51,12 @@ var ( BuildTime = "none" ) +func init() { + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + debugtrap.SetupDumpStackTrap() +} + func main() { app := cli.NewApp() @@ -66,8 +74,16 @@ func main() { app.Flags = append([]cli.Flag{ cli.BoolFlag{ - Name: "verbose", - Usage: "enables verbose output", + Name: "verbose, vv, D", + }, + cli.BoolFlag{ + Name: "json", + }, + cli.BoolTFlag{ + Name: "colors", + }, + cli.BoolFlag{ + Name: "cmd, C", }, }, dockerclient.GlobalCliParams()...) @@ -87,10 +103,24 @@ func main() { Value: &cli.StringSlice{}, Usage: "set variables to pass to build tasks, value is like \"key=value\"", }, + cli.StringSliceFlag{ + Name: "vars", + Value: &cli.StringSlice{}, + Usage: "Load variables form a file, either JSON or YAML. Can pass multiple of this.", + }, cli.BoolFlag{ Name: "no-cache", Usage: "supresses cache for docker builds", }, + cli.BoolFlag{ + Name: "reload-cache", + Usage: "removes any cache that hit and save the new one", + }, + cli.StringFlag{ + Name: "cache-dir", + Value: "~/.rocker_cache", + Usage: "Set the directory where the cache will be stored", + }, cli.BoolFlag{ Name: "no-reuse", Usage: "suppresses reuse for all the volumes in the build", @@ -115,6 +145,10 @@ func main() { Name: "print", Usage: "just print the Rockerfile after template processing and stop", }, + cli.BoolFlag{ + Name: "demand-artifacts", + Usage: "fail if artifacts not found for {{ image }} helpers", + }, cli.StringFlag{ Name: "id", Usage: "override the default id generation strategy for current build", @@ -123,6 +157,10 @@ func main() { Name: "artifacts-path", Usage: "put artifacts (files with pushed images description) to the directory", }, + cli.BoolFlag{ + Name: "no-garbage", + Usage: "remove the images from the tail if not tagged", + }, } app.Commands = []cli.Command{ @@ -131,22 +169,7 @@ func main() { Usage: "launches a build for the specified Rockerfile", Action: buildCommand, Flags: buildFlags, - }, - { - Name: "show", - Usage: "shows information about any image", - Action: showCommand, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "json", - Usage: "print output in json", - }, - }, - }, - { - Name: "clean", - Usage: "complete a task on the list", - Action: cleanCommand, + Before: globalBefore, }, dockerclient.InfoCommandSpec(), } @@ -162,64 +185,105 @@ func main() { } } +func globalBefore(c *cli.Context) error { + if c.GlobalBool("cmd") { + log.Infof("Cmd: %s", strings.Join(os.Args, " ")) + } + return nil +} + func buildCommand(c *cli.Context) { - configFilename := c.String("file") - wd, err := os.Getwd() + var ( + rockerfile *build.Rockerfile + err error + ) + + initLogs(c) + + // We don't want info level for 'print' mode + // So log only errors unless 'debug' is on + if c.Bool("print") && log.StandardLogger().Level != log.DebugLevel { + log.StandardLogger().Level = log.ErrorLevel + } + + vars, err := template.VarsFromFileMulti(c.StringSlice("vars")) if err != nil { log.Fatal(err) + os.Exit(1) } - if !filepath.IsAbs(configFilename) { - configFilename = filepath.Clean(path.Join(wd, configFilename)) + cliVars, err := template.VarsFromStrings(c.StringSlice("var")) + if err != nil { + log.Fatal(err) } - // we do not want to outpu anything if "print" was asked - // TODO: find a more clean way to suppress output - if !c.Bool("print") { - fmt.Printf("[Rocker] Building...\n") + vars = vars.Merge(cliVars) + + if c.Bool("demand-artifacts") { + vars["DemandArtifacts"] = true } - dockerClient, err := dockerclient.NewFromCli(c) + wd, err := os.Getwd() if err != nil { log.Fatal(err) } - // Initialize context dir + configFilename := c.String("file") + contextDir := wd + + if configFilename == "-" { + + rockerfile, err = build.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{}) + if err != nil { + log.Fatal(err) + } + + } else { + + if !filepath.IsAbs(configFilename) { + configFilename = filepath.Join(wd, configFilename) + } + + rockerfile, err = build.NewRockerfileFromFile(configFilename, vars, template.Funs{}) + if err != nil { + log.Fatal(err) + } + + // Initialize context dir + contextDir = filepath.Dir(configFilename) + } + args := c.Args() - contextDir := filepath.Dir(configFilename) if len(args) > 0 { - if filepath.IsAbs(args[0]) { - contextDir = args[0] - } else { - contextDir = filepath.Clean(path.Join(wd, args[0])) + contextDir = args[0] + if !filepath.IsAbs(contextDir) { + contextDir = filepath.Join(wd, args[0]) } } - cliVars, err := template.VarsFromStrings(c.StringSlice("var")) - if err != nil { - log.Fatal(err) + log.Debugf("Context directory: %s", contextDir) + + if c.Bool("print") { + fmt.Print(rockerfile.Content) + os.Exit(0) } - vars := template.Vars{}.Merge(cliVars) + dockerignore := []string{} - // obtain git info about current directory - gitInfo, err := git.Info(filepath.Dir(configFilename)) - if err != nil { - // Ignore if given directory is not a git repo - if _, ok := err.(*git.ErrNotGitRepo); !ok { + dockerignoreFilename := filepath.Join(contextDir, ".dockerignore") + if _, err := os.Stat(dockerignoreFilename); err == nil { + if dockerignore, err = build.ReadDockerignoreFile(dockerignoreFilename); err != nil { log.Fatal(err) } } - // some additional useful vars - vars["commit"] = stringOr(os.Getenv("GIT_COMMIT"), gitInfo.Sha) - vars["branch"] = stringOr(os.Getenv("GIT_BRANCH"), gitInfo.Branch) - vars["git_url"] = stringOr(os.Getenv("GIT_URL"), gitInfo.URL) - vars["commit_message"] = gitInfo.Message - vars["commit_author"] = gitInfo.Author + dockerClient, err := dockerclient.NewFromCli(c) + if err != nil { + log.Fatal(err) + } - auth := &docker.AuthConfiguration{} + auth := docker.AuthConfiguration{} authParam := c.String("auth") if strings.Contains(authParam, ":") { userPass := strings.Split(authParam, ":") @@ -227,126 +291,82 @@ func buildCommand(c *cli.Context) { auth.Password = userPass[1] } - builder := build.Builder{ - Rockerfile: configFilename, - ContextDir: contextDir, - UtilizeCache: !c.Bool("no-cache"), - Push: c.Bool("push"), - NoReuse: c.Bool("no-reuse"), - Verbose: c.Bool("verbose"), - Attach: c.Bool("attach"), - Print: c.Bool("print"), - Auth: auth, - Vars: vars, - CliVars: cliVars, + client := build.NewDockerClient(dockerClient, auth) + + var cache build.Cache + if !c.Bool("no-cache") { + cacheDir, err := util.MakeAbsolute(c.String("cache-dir")) + if err != nil { + log.Fatal(err) + } + cache = build.NewCacheFS(cacheDir) + } + + builder := build.New(client, rockerfile, cache, build.Config{ InStream: os.Stdin, OutStream: os.Stdout, - Docker: dockerClient, - AddMeta: c.Bool("meta"), + ContextDir: contextDir, + Dockerignore: dockerignore, + ArtifactsPath: c.String("artifacts-path"), Pull: c.Bool("pull"), + NoGarbage: c.Bool("no-garbage"), + Attach: c.Bool("attach"), + Verbose: c.GlobalBool("verbose"), ID: c.String("id"), - ArtifactsPath: c.String("artifacts-path"), - } + NoCache: c.Bool("no-cache"), + ReloadCache: c.Bool("reload-cache"), + Push: c.Bool("push"), + }) - if _, err := builder.Build(); err != nil { + plan, err := build.NewPlan(rockerfile.Commands(), true) + if err != nil { log.Fatal(err) } -} -func showCommand(c *cli.Context) { - dockerClient, err := dockerclient.NewFromCli(c) - if err != nil { + // Check the docker connection before we actually run + if err := dockerclient.Ping(dockerClient, 5000); err != nil { log.Fatal(err) } - // Initialize context dir - args := c.Args() - if len(args) == 0 { - log.Fatal("Missing image argument") + if err := builder.Run(plan); err != nil { + log.Fatal(err) } - //parse parameter to name - imageName := imagename.NewFromString(args[0]) - infos := []*build.RockerImageData{} - - if imageName.IsStrict() { - image, err := dockerClient.InspectImage(args[0]) - if err != nil && err.Error() == "no such image" { - image, err = imagename.RegistryGet(imageName) - if err != nil { - log.Fatal(err) - } - } else if err != nil { - log.Fatal(err) - } - info, err := toInfo(imageName, image) - if err != nil { - log.Fatal(err) - } - infos = append(infos, info) - } else { - images, err := imagename.RegistryListTags(imageName) - if err != nil { - log.Fatal(err) - } - type resp struct { - name *imagename.ImageName - image *docker.Image - err error - } - chResp := make(chan resp, len(images)) - - for _, img := range images { - go func(img *imagename.ImageName) { - r := resp{name: img} - r.image, r.err = imagename.RegistryGet(img) - chResp <- r - }(img) - } + size := fmt.Sprintf("final size %s (+%s from the base image)", + units.HumanSize(float64(builder.VirtualSize)), + units.HumanSize(float64(builder.ProducedSize)), + ) - for _ = range images { - r := <-chResp - if r.err != nil { - log.Println(r.err) - } else if info, err := toInfo(r.name, r.image); err == nil { - infos = append(infos, info) - } - } - } + log.Infof("Successfully built %.12s | %s", builder.GetImageID(), size) +} - if c.Bool("json") { - res, err := json.Marshal(infos) - if err != nil { - log.Fatal(err) - } - fmt.Println(string(res)) - } else { - for _, res := range infos { - fmt.Println(res.PrettyString()) - } +func initLogs(ctx *cli.Context) { + logger := log.StandardLogger() + + if ctx.GlobalBool("verbose") { + logger.Level = log.DebugLevel } -} -func toInfo(name *imagename.ImageName, image *docker.Image) (*build.RockerImageData, error) { - data := &build.RockerImageData{} + var ( + isTerm = log.IsTerminal() + json = ctx.GlobalBool("json") + useColors = isTerm && !json + ) - if image.Config != nil { - if _, ok := image.Config.Labels["rocker-data"]; ok { - if err := json.Unmarshal([]byte(image.Config.Labels["rocker-data"]), data); err != nil { - return nil, err - } - } - data.Created = image.Created + if ctx.GlobalIsSet("colors") { + useColors = ctx.GlobalBool("colors") } - data.ImageName = name - return data, nil -} + color.NoColor = !useColors -func cleanCommand(c *cli.Context) { - verbose := c.Bool("verbose") - fmt.Println("verbose") - fmt.Println(verbose) + if json { + logger.Formatter = &log.JSONFormatter{} + } else { + formatter := &textformatter.TextFormatter{} + formatter.DisableColors = !useColors + + logger.Formatter = formatter + } } func stringOr(args ...string) string { diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go new file mode 100644 index 00000000..e38c67e8 --- /dev/null +++ b/src/rocker/build/build.go @@ -0,0 +1,346 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "fmt" + "io" + "rocker/imagename" + + "github.com/docker/docker/pkg/units" + "github.com/fatih/color" + + "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" +) + +var ( + // NoBaseImageSpecifier defines the empty image name, used in the FROM instruction + NoBaseImageSpecifier = "scratch" + + // MountVolumeImage used for MOUNT volume containers + MountVolumeImage = "grammarly/scratch:latest" + + // RsyncImage used for EXPORT volume containers + RsyncImage = "grammarly/rsync-static:1" + + // ExportsPath is the path within EXPORT volume containers + ExportsPath = "/.rocker_exports" +) + +// Config used specify parameters for the builder in New() +type Config struct { + OutStream io.Writer + InStream io.ReadCloser + ContextDir string + ID string + Dockerignore []string + ArtifactsPath string + Pull bool + NoGarbage bool + Attach bool + Verbose bool + NoCache bool + ReloadCache bool + Push bool +} + +// Build is the main object that processes build +type Build struct { + ProducedSize int64 + VirtualSize int64 + + rockerfile *Rockerfile + cache Cache + cfg Config + client Client + state State + + // A little hack to support cross-FROM cache for EXPORTS + // maybe rethink it later + exports []string +} + +// New creates the new build object +func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build { + b := &Build{ + rockerfile: rockerfile, + cache: cache, + cfg: cfg, + client: client, + exports: []string{}, + } + b.state = NewState(b) + return b +} + +// Run runs the build following the given Plan +func (b *Build) Run(plan Plan) (err error) { + + for k := 0; k < len(plan); k++ { + c := plan[k] + + log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) + + var doRun bool + if doRun, err = c.ShouldRun(b); err != nil { + return err + } + if !doRun { + continue + } + + // Replace env for the command if appropriate + if c, ok := c.(EnvReplacableCommand); ok { + c.ReplaceEnv(b.state.Config.Env) + } + + log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) + + if b.state, err = c.Execute(b); err != nil { + return err + } + + log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) + + // Here we need to inject ONBUILD commands on the fly, + // build sub plan and merge it with the main plan. + // Not very beautiful, because Run uses Plan as the argument + // and then it builds its own. But. + if len(b.state.InjectCommands) > 0 { + commands, err := parseOnbuildCommands(b.state.InjectCommands) + if err != nil { + return err + } + subPlan, err := NewPlan(commands, false) + if err != nil { + return err + } + tail := append(subPlan, plan[k+1:]...) + plan = append(plan[:k+1], tail...) + + b.state.InjectCommands = []string{} + } + } + + return nil +} + +// GetState returns current build state object +func (b *Build) GetState() State { + return b.state +} + +// GetImageID returns last image ID produced by the build +func (b *Build) GetImageID() string { + return b.state.ImageID +} + +func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { + if b.cache == nil || s.NoCache.CacheBusted { + return s, false, nil + } + + var s2 *State + if s2, err = b.cache.Get(s); err != nil { + return s, false, err + } + if s2 == nil { + s.NoCache.CacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) + return s, false, nil + } + + if b.cfg.ReloadCache { + defer b.cache.Del(*s2) + s.NoCache.CacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Reload cache")) + return s, false, nil + } + + var img *docker.Image + if img, err = b.client.InspectImage(s2.ImageID); err != nil { + return s, true, err + } + if img == nil { + defer b.cache.Del(*s2) + s.NoCache.CacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) + return s, false, nil + } + + size := fmt.Sprintf("%s (+%s)", + units.HumanSize(float64(img.VirtualSize)), + units.HumanSize(float64(img.Size)), + ) + + log.WithFields(log.Fields{ + "size": size, + }).Infof(color.New(color.FgGreen).SprintfFunc()("| Cached! Take image %.12s", s2.ImageID)) + + // Store some stuff to the build + b.ProducedSize += img.Size + b.VirtualSize = img.VirtualSize + + // Keep items that should not be cached from the previous state + s2.NoCache = s.NoCache + // We don't want commits to go through the cache + s2.CleanCommits() + + return *s2, true, nil +} + +func (b *Build) getVolumeContainer(path string) (name string, err error) { + + name = b.mountsContainerName(path) + + config := &docker.Config{ + Image: MountVolumeImage, + Volumes: map[string]struct{}{ + path: struct{}{}, + }, + } + + log.Debugf("Make MOUNT volume container %s with options %# v", name, config) + + if _, err = b.client.EnsureContainer(name, config, path); err != nil { + return name, err + } + + log.Infof("| Using container %s for %s", name, path) + + return name, nil +} + +func (b *Build) getExportsContainer() (name string, err error) { + name = b.exportsContainerName() + + config := &docker.Config{ + Image: RsyncImage, + Volumes: map[string]struct{}{ + "/opt/rsync/bin": struct{}{}, + ExportsPath: struct{}{}, + }, + } + + log.Debugf("Make EXPORT container %s with options %# v", name, config) + + containerID, err := b.client.EnsureContainer(name, config, "exports") + if err != nil { + return "", err + } + + log.Infof("| Using exports container %s", name) + + return containerID, nil +} + +// lookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) +// `Pull` config option defines whether we want to update the latest version of the image from the remote registry +// See build.Config struct for more details about other build config options. +// +// If `Pull` is false, it tries to lookup locally by exact matching, e.g. if the image is already +// pulled with that exact name given (no fuzzy semver matching) +// +// Then the function fetches the list of all pulled images and tries to match one of them by the given name. +// +// If `Pull` is set to true or if it cannot find the image locally, it then fetches all image +// tags from the remote registry and finds the best match for the given image name. +// +// If it cannot find the image either locally or in the remote registry, it returns `nil` +// +// In case the given image has sha256 tag, it looks for it locally and pulls if it's not found. +// No semver matching is done for sha256 tagged images. +// +// See also TestBuild_LookupImage_* test cases in build_test.go +func (b *Build) lookupImage(name string) (img *docker.Image, err error) { + var ( + candidate, remoteCandidate *imagename.ImageName + + imgName = imagename.NewFromString(name) + pull = false + hub = b.cfg.Pull + isSha = imgName.TagIsSha() + ) + + // If hub is true, then there is no sense to inspect the local image + if !hub || isSha { + // Try to inspect image as is, without version resolution + if img, err := b.client.InspectImage(name); err != nil || img != nil { + return img, err + } + } + + if isSha { + // If we are still here and image not found locally, we want to pull it + candidate = imgName + hub = false + pull = true + } + + if !isSha && !hub { + // List local images + var localImages = []*imagename.ImageName{} + if localImages, err = b.client.ListImages(); err != nil { + return nil, err + } + // Resolve local candidate + candidate = imgName.ResolveVersion(localImages) + } + + // In case we want to include external images as well, pulling list of available + // images from the remote registry + if hub || candidate == nil { + log.Debugf("Getting list of tags for %s from the registry", imgName) + + var remoteImages []*imagename.ImageName + + if remoteImages, err = b.client.ListImageTags(imgName.String()); err != nil { + err = fmt.Errorf("Failed to list tags of image %s from the remote registry, error: %s", imgName, err) + } + + // Since we found the remote image, we want to pull it + if remoteCandidate = imgName.ResolveVersion(remoteImages); remoteCandidate != nil { + pull = true + candidate = remoteCandidate + } + } + + // If not candidate found, it's an error + if candidate == nil { + err = fmt.Errorf("Image not found: %s (also checked in the remote registry)", imgName) + return + } + + if !isSha && imgName.GetTag() != candidate.GetTag() { + if remoteCandidate != nil { + log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) + } else { + log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + } + } + + if pull { + if err = b.client.PullImage(candidate.String()); err != nil { + return + } + } + + return b.client.InspectImage(candidate.String()) +} diff --git a/src/rocker/build/build_test.go b/src/rocker/build/build_test.go new file mode 100644 index 00000000..f68b972a --- /dev/null +++ b/src/rocker/build/build_test.go @@ -0,0 +1,355 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "io" + "rocker/imagename" + "rocker/template" + "runtime" + "strings" + "testing" + + "github.com/fsouza/go-dockerclient" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestBuild_NewBuild(t *testing.T) { + b, _ := makeBuild(t, "FROM ubuntu", Config{}) + assert.IsType(t, &Rockerfile{}, b.rockerfile) +} + +func TestBuild_ReplaceEnvVars(t *testing.T) { + rockerfile := "FROM ubuntu\nENV PATH=$PATH:/cassandra/bin" + b, c := makeBuild(t, rockerfile, Config{}) + plan := makePlan(t, rockerfile) + + img := &docker.Image{ + ID: "123", + Config: &docker.Config{ + Env: []string{"PATH=/usr/bin"}, + }, + } + + resultImage := &docker.Image{ID: "789"} + + c.On("InspectImage", "ubuntu").Return(img, nil).Once() + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"PATH=/usr/bin:/cassandra/bin"}, arg.Config.Env) + }).Once() + + c.On("CommitContainer", mock.AnythingOfType("State"), "ENV PATH=/usr/bin:/cassandra/bin").Return(resultImage, nil).Once() + + c.On("RemoveContainer", "456").Return(nil).Once() + + if err := b.Run(plan); err != nil { + t.Fatal(err) + } +} + +func TestBuild_LookupImage_ExactExistLocally(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + ) + + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_ExistLocally(t *testing.T) { + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + localImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("ListImages").Return(localImages, nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_NotExistLocally(t *testing.T) { + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + localImages = []*imagename.ImageName{} + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("ListImages").Return(localImages, nil).Once() + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_PullAndExist(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{Pull: true}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_PullAndNotExist(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{Pull: true}) + name = "ubuntu:latest" + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + } + ) + + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + + _, err := b.lookupImage(name) + assert.EqualError(t, err, "Image not found: ubuntu:latest (also checked in the remote registry)") + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_ShaExistLocally(t *testing.T) { + for _, pull := range []bool{true, false} { + t.Logf("Testing with pull=%t", pull) + + var ( + b, c = makeBuild(t, "", Config{Pull: pull}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu@sha256:afafa" + ) + + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) + } +} + +func TestBuild_LookupImage_ShaNotExistLocally(t *testing.T) { + for _, pull := range []bool{true, false} { + t.Logf("Testing with pull=%t", pull) + + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{Pull: pull}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu@sha256:afafa" + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) + } +} + +// internal helpers + +func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *MockClient) { + pc, _, _, _ := runtime.Caller(1) + fn := runtime.FuncForPC(pc) + + r, err := NewRockerfile(fn.Name(), strings.NewReader(rockerfileContent), template.Vars{}, template.Funs{}) + if err != nil { + t.Fatal(err) + } + + cfg.NoCache = true + + c := &MockClient{} + b := New(c, r, nil, cfg) + + return b, c +} + +// Docker client mock + +type MockClient struct { + mock.Mock +} + +func (m *MockClient) InspectImage(name string) (*docker.Image, error) { + args := m.Called(name) + return args.Get(0).(*docker.Image), args.Error(1) +} + +func (m *MockClient) PullImage(name string) error { + args := m.Called(name) + return args.Error(0) +} + +func (m *MockClient) ListImages() (images []*imagename.ImageName, err error) { + args := m.Called() + return args.Get(0).([]*imagename.ImageName), args.Error(1) +} + +func (m *MockClient) ListImageTags(name string) (images []*imagename.ImageName, err error) { + args := m.Called(name) + return args.Get(0).([]*imagename.ImageName), args.Error(1) +} + +func (m *MockClient) RemoveImage(imageID string) error { + args := m.Called(imageID) + return args.Error(0) +} + +func (m *MockClient) TagImage(imageID, imageName string) error { + args := m.Called(imageID, imageName) + return args.Error(0) +} + +func (m *MockClient) PushImage(imageName string) (string, error) { + args := m.Called(imageName) + return args.String(0), args.Error(1) +} + +func (m *MockClient) CreateContainer(state State) (string, error) { + args := m.Called(state) + return args.String(0), args.Error(1) +} + +func (m *MockClient) RunContainer(containerID string, attach bool) error { + args := m.Called(containerID, attach) + return args.Error(0) +} + +func (m *MockClient) CommitContainer(state State, message string) (*docker.Image, error) { + args := m.Called(state, message) + return args.Get(0).(*docker.Image), args.Error(1) +} + +func (m *MockClient) RemoveContainer(containerID string) error { + args := m.Called(containerID) + return args.Error(0) +} + +func (m *MockClient) UploadToContainer(containerID string, stream io.Reader, path string) error { + args := m.Called(containerID, stream, path) + return args.Error(0) +} + +func (m *MockClient) ResolveHostPath(path string) (resultPath string, err error) { + args := m.Called(path) + return args.String(0), args.Error(1) +} + +func (m *MockClient) EnsureImage(imageName string) error { + args := m.Called(imageName) + return args.Error(0) +} + +func (m *MockClient) EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) { + args := m.Called(containerName, config, purpose) + return args.String(0), args.Error(1) +} + +// type MockCache struct { +// mock.Mock +// } + +// func (m *MockCache) Get(s State) (s2 *State, err error) { + +// } + +// func (m *MockCache) Put(s State) error { + +// } diff --git a/src/rocker/build/builder.go b/src/rocker/build/builder.go deleted file mode 100644 index 1c20633e..00000000 --- a/src/rocker/build/builder.go +++ /dev/null @@ -1,394 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package build does build of given Rockerfile -package build - -import ( - "crypto/md5" - "fmt" - "io" - "os" - "path" - "path/filepath" - "strings" - - "rocker/imagename" - "rocker/parser" - "rocker/template" - - "github.com/docker/docker/pkg/term" - "github.com/fsouza/go-dockerclient" -) - -const ( - // busybox used for cache data volume containers - busyboxImage = "busybox:buildroot-2013.08.1" - rsyncImage = "grammarly/rsync-static:1" - - exportsVolume = "/.rocker_exports" -) - -var ( - // PassEnvVars is the list of ENV variables to pass to a Rockerfile - PassEnvVars = []string{"GIT_SSH_KEY"} -) - -// Builder is the main builder object. It holds configuration options and -// intermedate state while looping through a build commands. -type Builder struct { - Rockerfile string - RockerfileContent string - ContextDir string - ID string - OutStream io.Writer - InStream io.ReadCloser - Docker *docker.Client - Config *docker.Config - Auth *docker.AuthConfiguration - UtilizeCache bool - Push bool - NoReuse bool - Verbose bool - Attach bool - Vars template.Vars - CliVars template.Vars - AddMeta bool - Print bool - Pull bool - ArtifactsPath string - - rootNode *parser.Node - i int - imageID string - mounts []builderMount - allMounts []builderMount - dockerfile *parser.Node - cacheBusted bool - exportDirs []string - intermediateImages []string - exportsContainerID string - lastExportImageID string - gitIgnored bool - isTerminalIn bool - isTerminalOut bool - fdIn uintptr - fdOut uintptr - metaAdded bool - recentTags []*imagename.ImageName - imagesCache []docker.APIImages -} - -type builderMount struct { - cache bool - origSrc string - src string - dest string - containerID string -} - -func (mount builderMount) String() string { - if mount.src != "" { - return mount.src + ":" + mount.dest - } - return mount.dest + ":" + mount.containerID -} - -// Build runs the build of given Rockerfile and returns image id -func (builder *Builder) Build() (imageID string, err error) { - // Do initial cleanup, you know, just to be sure - // Previous builds could be ended up abnormally - if err := builder.cleanup(); err != nil { - return "", err - } - - // Initialize auth configuration - if builder.Auth == nil { - builder.Auth = &docker.AuthConfiguration{} - } - - // Initialize in/out file descriptors - if builder.InStream != nil { - fd, isTerminal := term.GetFdInfo(builder.InStream) - builder.fdIn = fd - builder.isTerminalIn = isTerminal - } - if builder.OutStream != nil { - fd, isTerminal := term.GetFdInfo(builder.OutStream) - builder.fdOut = fd - builder.isTerminalOut = isTerminal - } - - // Wrap this into function to have deferred functions run before - // we do final checks - run := func() (err error) { - fd, err := os.Open(builder.Rockerfile) - if err != nil { - return fmt.Errorf("Failed to open file %s, error: %s", builder.Rockerfile, err) - } - defer fd.Close() - - data, err := template.Process(builder.Rockerfile, fd, builder.Vars.ToMapOfInterface(), map[string]interface{}{}) - if err != nil { - return err - } - builder.RockerfileContent = data.String() - - if builder.Print { - fmt.Print(builder.RockerfileContent) - os.Exit(0) - } - - if builder.ContextDir == "" { - builder.ContextDir = filepath.Dir(builder.Rockerfile) - } - - if _, err := os.Stat(builder.ContextDir); err != nil { - return err - } - - if err := builder.checkDockerignore(); err != nil { - return err - } - - rootNode, err := parser.Parse(strings.NewReader(builder.RockerfileContent)) - if err != nil { - return err - } - - builder.rootNode = rootNode - builder.dockerfile = &parser.Node{} - - defer func() { - if err2 := builder.cleanup(); err2 != nil && err == nil { - err = err2 - } - }() - - for builder.i = 0; builder.i < len(builder.rootNode.Children); builder.i++ { - oldImageID := builder.imageID - - if err := builder.dispatch(builder.i, builder.rootNode.Children[builder.i]); err != nil { - return err - } - - if builder.imageID != oldImageID && builder.imageID != "" { - fmt.Fprintf(builder.OutStream, "[Rocker] ---> %.12s\n", builder.imageID) - } - } - - if err := builder.runDockerfile(); err != nil { - return err - } - - return nil - } - - if err := run(); err != nil { - return "", err - } - - if builder.imageID == "" { - return "", fmt.Errorf("No image was generated. Is your Rockerfile empty?") - } - - fmt.Fprintf(builder.OutStream, "[Rocker] Successfully built %.12s\n", builder.imageID) - - return builder.imageID, nil -} - -// dispatch runs a particular command -func (builder *Builder) dispatch(stepN int, node *parser.Node) (err error) { - cmd := node.Value - attrs := node.Attributes - original := node.Original - args := []string{} - flags := parseFlags(node.Flags) - - // fill in args and substitute vars - for n := node.Next; n != nil; n = n.Next { - // TODO: we also may want to collect ENV variables to use in EXPORT for example - n.Value = builder.Vars.ReplaceString(n.Value) - args = append(args, n.Value) - } - - switch cmd { - - case "mount", "run", "export", "import", "tag", "push", "require", "var", "include", "attach", "from": - // we do not have to eval RUN ourselves if we have no mounts - if cmd == "run" && len(builder.mounts) == 0 { - break - } - // also skip initial FROM command - if cmd == "from" && builder.imageID == "" { - break - } - // run dockerfile we have collected so far - // except if we have met INCLUDE - if cmd != "include" { - if err := builder.runDockerfile(); err != nil { - return err - } - } - - // do not want to report processing FROM command (unnecessary) - if cmd != "from" { - fmt.Fprintf(builder.OutStream, "[Rocker] %s %s\n", strings.ToUpper(cmd), strings.Join(args, " ")) - } - - switch cmd { - case "mount": - return builder.cmdMount(args, attrs, flags, original) - case "export": - return builder.cmdExport(args, attrs, flags, original) - case "import": - return builder.cmdImport(args, attrs, flags, original) - case "run": - return builder.cmdRun(args, attrs, flags, original) - case "tag": - return builder.cmdTag(args, attrs, flags, original) - case "push": - return builder.cmdPush(args, attrs, flags, original) - case "require": - return builder.cmdRequire(args, attrs, flags, original) - case "var": - return builder.cmdVar(args, attrs, flags, original) - case "include": - return builder.cmdInclude(args, attrs, flags, original) - case "attach": - return builder.cmdAttach(args, attrs, flags, original) - case "from": - // We don't need previous image - // TODO: check it will be not deleted if tagged - builder.intermediateImages = append(builder.intermediateImages, builder.imageID) - builder.reset() - } - - // use it for warnings if .git is not ignored - case "add", "copy": - addAll := false - if len(args) > 0 { - for _, arg := range args[:len(args)-1] { - allArg := arg == "/" || arg == "." || arg == "./" || arg == "*" || arg == "./*" - addAll = addAll || allArg - } - } - hasGitInRoot := false - if _, err := os.Stat(builder.ContextDir + "/.git"); err == nil { - hasGitInRoot = true - } - if hasGitInRoot && !builder.gitIgnored && addAll { - fmt.Fprintf(builder.OutStream, - "[Rocker] *** WARNING .git is not ignored in .dockerignore; not ignoring .git will beat caching of: %s\n", original) - } - } - - // TODO: cancel build? - - // collect dockerfile - builder.pushToDockerfile(node) - - return nil -} - -// reset does reset the builder state; it is used in between different FROMs -// it doest not reset completely, some properties are shared across FROMs -func (builder *Builder) reset() { - builder.mounts = []builderMount{} - builder.imageID = "" - builder.dockerfile = &parser.Node{} - builder.Config = &docker.Config{} - builder.cacheBusted = false - builder.metaAdded = false - return -} - -// pushToDockerfile collects commands that will falled back to a `docker build` -func (builder *Builder) pushToDockerfile(node *parser.Node) { - builder.dockerfile.Children = append(builder.dockerfile.Children, node) -} - -// addMount adds a mount structure to the state -func (builder *Builder) addMount(mount builderMount) { - builder.mounts = append(builder.mounts, mount) - builder.allMounts = append(builder.allMounts, mount) -} - -// removeLastMount pops mount structure from the state -func (builder *Builder) removeLastMount() { - if len(builder.mounts) == 0 { - return - } - builder.mounts = builder.mounts[0 : len(builder.mounts)-1] -} - -// rockerfileName returns basename of current Rockerfile -func (builder *Builder) rockerfileName() string { - return filepath.Base(builder.Rockerfile) -} - -// rockerfileRelativePath returns the path of the current Rockerfile relative to the context dir -// TODO: whyrockerfileRelativePath() returns the basename instead? Need to test it -func (builder *Builder) rockerfileRelativePath() string { - return filepath.Base(builder.Rockerfile) -} - -// dockerfileName generates the name of Dockerfile that will be written to a context dir -// and then thrown to a `docker build` fallback -func (builder *Builder) dockerfileName() string { - // Here we cannot puth temporary Dockerfile into tmp directory - // That's how docker ignore technique works - it does not remove the direcotry itself, sadly - dockerfileName := builder.getTmpPrefix() + "_" + builder.rockerfileName() - if builder.imageID == "" { - return dockerfileName + "_init" - } - return dockerfileName + "_" + fmt.Sprintf("%.12s", builder.imageID) -} - -// getTmpPrefix returns the prefix for all of rocker's tmp files that will be written -// to the currect directory -func (builder *Builder) getTmpPrefix() string { - return ".rockertmp" -} - -// getIdentifier returns the sequence that is unique to the current Rockerfile -func (builder *Builder) getIdentifier() string { - if builder.ID != "" { - return builder.ID - } - return builder.ContextDir + ":" + builder.Rockerfile -} - -// mountsContainerName returns the name of volume container that will be used for a particular MOUNT -func (builder *Builder) mountsContainerName(destinations []string) string { - // TODO: should mounts be reused between different FROMs ? - mountID := builder.getIdentifier() + ":" + strings.Join(destinations, ":") - return fmt.Sprintf("rocker_mount_%.6x", md5.Sum([]byte(mountID))) -} - -// exportsContainerName return the name of volume container that will be used for EXPORTs -func (builder *Builder) exportsContainerName() string { - mountID := builder.getIdentifier() - return fmt.Sprintf("rocker_exports_%.6x", md5.Sum([]byte(mountID))) -} - -// cleanup cleans all tmp files produced by the build -func (builder *Builder) cleanup() error { - // All we have to do is remove tmpDir - // This will disable us to do parallel builds, but much easier to implement! - os.RemoveAll(path.Join(builder.ContextDir, builder.getTmpPrefix())) - return nil -} diff --git a/src/rocker/build/builder_test.go b/src/rocker/build/builder_test.go deleted file mode 100644 index c5220e2e..00000000 --- a/src/rocker/build/builder_test.go +++ /dev/null @@ -1,1152 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -// This is a suite of integration tests for rocker/build -// I have no idea of how to isolate it and run without Docker - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "rocker/dockerclient" - "rocker/template" - "rocker/test" - "rocker/util" - - "github.com/stretchr/testify/assert" - - "github.com/fsouza/go-dockerclient" -) - -func TestBuilderBuildBasic(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuild_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -ENTRYPOINT ls / -RUN touch /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Contains(t, result, "testing", "expected result (ls) to contain testing file") -} - -func TestBuilderBuildTag(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -TAG testing -RUN touch /testing -PUSH quay.io/testing_project`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"ls", "/"}, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "true", "true", "failed") -} - -func TestBuilderBuildSemverTag(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildSemverTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM scratch -TAG --semver testing:1.2.3-build123`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - // Vars: VarsFromStrings([]string{"branch=master", "commit=314ad"}), - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"ls", "/"}, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "true", "true", "failed") -} - -func TestBuilderBuildTagLabels(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTagLabels_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - rockerfileContent := `FROM busybox:buildroot-2013.08.1 -TAG testing -RUN touch /testing -LABEL foo=bar -PUSH quay.io/testing_project` - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": rockerfileContent, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - vars, err := template.VarsFromStrings([]string{"asd=qwe"}) - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - CliVars: vars, - Docker: client, - AddMeta: true, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - inspect, err := client.InspectImage(imageID) - if err != nil { - t.Fatal(err) - } - - // test inherited labels - assert.Equal(t, "bar", inspect.Config.Labels["foo"]) - - // test rockerfile content - data := &RockerImageData{} - if err := json.Unmarshal([]byte(inspect.Config.Labels["rocker-data"]), data); err != nil { - t.Fatal(err) - } - assert.Equal(t, rockerfileContent, data.Rockerfile) - - // test vars - assert.Equal(t, vars, data.Vars) -} - -func TestBuilderBuildMounts(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT /app/node_modules /app/bower_components -RUN ls /app > /out -CMD cat /out`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Cleanup mount containers - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "bower_components\nnode_modules\n", result, "expected both volumes to be mounted") - assert.Equal(t, 1, len(builder.getMountContainerIds()), "expected only one volume container to be created") -} - -func TestBuilderMountFromHost(t *testing.T) { - - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - // Use current working directroy as a temp dir to make MOUNT work in boot2docker - tempDir, err := ioutil.TempDir(wd, "rocker_TestBuilderMountFromHost_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT .:/src -RUN echo "hello" > /src/test`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - content, err := ioutil.ReadFile(tempDir + "/test") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "hello\n", string(content)) -} - -func TestBuilderBuildVars(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildVars_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -RUN echo "version:$version" > /version`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - vars, err := template.VarsFromStrings([]string{"version=125"}) - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Vars: vars, - // Push: true, - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"cat", "/version"}, - }, nil) - - assert.Equal(t, "version:125\n", result, "failed") -} - -func TestBuilderBuildMultiple(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildMultiple_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/index.js": "console.log('hello')", - "/data/README": "hello", - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -ADD . /app -MOUNT /app/node_modules -RUN echo "hehe" > /app/node_modules/some_module && \ - cd /app/node_modules && \ - ln -sf some_module link_to_some_module -EXPORT /app -FROM busybox:buildroot-2013.08.1 -IMPORT /app - `, - }) - if err != nil { - t.Fatal(err) - } - - imageIDs := make(map[string]struct{}) - mounts := make(map[string]struct{}) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func() (imageID string, err error) { - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - UtilizeCache: true, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if mountContainerID != "" { - mounts[mountContainerID] = struct{}{} - } - } - }() - - imageID, err = builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - imageIDs[imageID] = struct{}{} - - for _, imageID := range builder.intermediateImages { - imageIDs[imageID] = struct{}{} - } - - return imageID, nil - } - - defer func() { - for imageID := range imageIDs { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - } - }() - - // Cleanup mount containers - defer func() { - for mountContainerID := range mounts { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - imageID1, err := run() - if err != nil { - t.Fatal(err) - } - - fmt.Println("============================================================") - - imageID2, err := run() - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, imageID1, imageID2, "expected images to be equal (valid caching behavior)") -} - -func TestBuilderBuildContainerVolume(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildContainerVolume_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT /cache -RUN echo "hello" >> /cache/output.log -RUN cp /cache/output.log /result_cache.log -CMD cat /result_cache.log`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - // Step 1 - - runUtilizeCache := func(utilizeCache bool) (result string, err error) { - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - UtilizeCache: utilizeCache, - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - // Cleanup mount containers - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Step 2 - - builder2 := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - UtilizeCache: utilizeCache, - Docker: client, - } - - imageID2, err := builder2.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID2: %s", imageID2) - - defer func() { - if err := client.RemoveImageExtended(imageID2, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID2, - }, nil) - } - - result1, err := runUtilizeCache(true) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, "hello\n", result1, "failed") - - result2, err := runUtilizeCache(false) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, "hello\nhello\n", result2, "failed") -} - -func TestBuilderBuildAddCache(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildAddCache_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/data/README": "hello", - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -ADD . /src -RUN ls -la /src -`, - }) - if err != nil { - t.Fatal(err) - } - - var imageIDs []string - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func() (imageID string, err error) { - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - UtilizeCache: true, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err = builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - imageIDs = append(imageIDs, imageID) - - return imageID, nil - } - - defer func() { - for _, imageID := range imageIDs { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - } - }() - - imageID1, err := run() - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - imageID2, err := run() - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, imageID1, imageID2, "expected images to be equal (valid caching behavior)") -} - -func TestBuilderBuildRequire(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildRequire_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -REQUIRE version -RUN echo "$version" > /testing -CMD cat /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func(vars []string) (string, error) { - tlpVars, err := template.VarsFromStrings(vars) - if err != nil { - return "", err - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Vars: tlpVars, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - } - - _, err1 := run([]string{}) - result, err2 := run([]string{"version=123"}) - - assert.Equal(t, "Var $version is required but not set", err1.Error()) - assert.Nil(t, err2, "expected second run to not give error") - assert.Equal(t, "123\n", result) -} - -func TestBuilderBuildVar(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildVar_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -VAR test=true -RUN touch /testing -RUN if [ "$test" == "true" ] ; then echo "done test" > /testing; fi -CMD cat /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func(vars []string) (string, error) { - tplVars, err := template.VarsFromStrings(vars) - if err != nil { - return "", err - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Vars: tplVars, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - } - - result1, err := run([]string{}) - if err != nil { - t.Fatal(err) - } - - result2, err := run([]string{"test=false"}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "done test\n", result1) - assert.Equal(t, "", result2) -} - -func TestBuilderBuildAttach(t *testing.T) { - t.Skip() - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildAttach_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -CMD ["/bin/sh"] -ATTACH --name=test-attach ["ls", "-la"]`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - InStream: os.Stdin, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Attach: true, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() -} - -func TestBuilderEnsureImage(t *testing.T) { - t.Skip() - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Auth: &docker.AuthConfiguration{}, - } - - image := "busybox:buildroot-2013.08.1" - - if err := builder.ensureImage(image, "testing"); err != nil { - t.Fatal(err) - } - - assert.Equal(t, "", "") -} - -func TestBuilderEnsureContainer(t *testing.T) { - t.Skip() - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Auth: &docker.AuthConfiguration{}, - } - - containerConfig := &docker.Config{ - Image: "grammarly/rsync-static:1", - } - containerName := "rocker_TestBuilderEnsureContainer" - - defer func() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ID: containerName, Force: true}); err != nil { - t.Fatal(err) - } - }() - - if _, err := builder.ensureContainer(containerName, containerConfig, "testing"); err != nil { - t.Fatal(err) - } - - assert.Equal(t, "", "") -} - -func TestBuilderBuildGitWarning(t *testing.T) { - t.Skip() - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildGitWarning_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/.git/HEAD": "hello", - "/testing": "hello2", - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -ADD . /`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %q", result) - - assert.Contains(t, result, "testing", "expected result (ls) to contain testing file") -} - -func TestBuilderBuildInclude(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildInclude_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/nodejs": ` -RUN touch /test/bin/nodejs -RUN touch /test/bin/npm -`, - "/java": ` -RUN touch /test/bin/java -RUN touch /test/bin/gradle -`, - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -RUN mkdir -p /test/bin -INCLUDE nodejs -INCLUDE java -CMD ["ls", "/test/bin"] -`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %q", result) - - assert.Equal(t, "gradle\njava\nnodejs\nnpm\n", result, "expected result (ls) to contain included files") -} - -func TestBuilderImportFromScratch(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderImportFromScratch_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -RUN mkdir -p /zzz && echo "hi" > /zzz/lalala -EXPORT zzz / - -FROM scratch -IMPORT zzz / -CMD ["true"] -`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Create data volume container with scratch image - c, err := client.CreateContainer(docker.CreateContainerOptions{ - Config: &docker.Config{ - Image: imageID, - Volumes: map[string]struct{}{ - "/zzz": struct{}{}, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ID: c.ID, RemoveVolumes: true, Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: "busybox:buildroot-2013.08.1", - Cmd: []string{"/bin/sh", "-c", "cat /zzz/lalala"}, - }, &docker.HostConfig{ - VolumesFrom: []string{c.ID}, - }) - - t.Logf("Got result: %q", result) - - assert.Equal(t, "hi\n", result) -} - -func runContainer(t *testing.T, client *docker.Client, config *docker.Config, hostConfig *docker.HostConfig) (result string, err error) { - if config == nil { - config = &docker.Config{} - } - if hostConfig == nil { - hostConfig = &docker.HostConfig{} - } - - opts := docker.CreateContainerOptions{ - Config: config, - HostConfig: hostConfig, - } - - container, err := client.CreateContainer(opts) - if err != nil { - return "", err - } - - // remove container after testing - defer func() { - if err2 := client.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID, Force: true}); err2 != nil && err == nil { - err = err2 - } - }() - - success := make(chan struct{}) - var buf bytes.Buffer - - attachOpts := docker.AttachToContainerOptions{ - Container: container.ID, - OutputStream: &buf, - ErrorStream: &buf, - Stream: true, - Stdout: true, - Stderr: true, - Success: success, - } - go client.AttachToContainer(attachOpts) - - success <- <-success - - err = client.StartContainer(container.ID, &docker.HostConfig{}) - if err != nil { - return "", err - } - - statusCode, err := client.WaitContainer(container.ID) - if err != nil { - return "", err - } - - if statusCode != 0 { - return "", fmt.Errorf("Failed to run container, exit with code %d", statusCode) - } - - return buf.String(), nil -} diff --git a/src/rocker/build/cache.go b/src/rocker/build/cache.go new file mode 100644 index 00000000..888957eb --- /dev/null +++ b/src/rocker/build/cache.go @@ -0,0 +1,105 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "time" + + log "github.com/Sirupsen/logrus" +) + +// Cache interface describes a cache backend +type Cache interface { + Get(s State) (s2 *State, err error) + Put(s State) error + Del(s State) error +} + +// CacheFS implements file based cache backend +type CacheFS struct { + root string +} + +// NewCacheFS creates a file based cache backend +func NewCacheFS(root string) *CacheFS { + return &CacheFS{ + root: root, + } +} + +// Get fetches cache +func (c *CacheFS) Get(s State) (res *State, err error) { + match := filepath.Join(c.root, s.ImageID) + + latestTime := time.Unix(0, 0) + + err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error { + if err != nil && os.IsNotExist(err) { + return nil + } + if info.IsDir() { + return nil + } + + s2 := State{} + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + if err := json.Unmarshal(data, &s2); err != nil { + return err + } + + log.Debugf("CACHE COMPARE %s %s %q %q", s.ImageID, s2.ImageID, s.Commits, s2.Commits) + + if s.Equals(s2) && info.ModTime().After(latestTime) { + latestTime = info.ModTime() + res = &s2 + } + + return nil + }) + + return +} + +// Put stores cache +func (c *CacheFS) Put(s State) error { + log.Debugf("CACHE PUT %s %s %q", s.ParentID, s.ImageID, s.Commits) + + fileName := filepath.Join(c.root, s.ParentID, s.ImageID) + ".json" + if err := os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { + return err + } + data, err := json.Marshal(s) + if err != nil { + return err + } + return ioutil.WriteFile(fileName, data, 0644) +} + +// Del deletes cache +func (c *CacheFS) Del(s State) error { + log.Debugf("CACHE DELETE %s %s %q", s.ParentID, s.ImageID, s.Commits) + + fileName := filepath.Join(c.root, s.ParentID, s.ImageID) + ".json" + return os.RemoveAll(fileName) +} diff --git a/src/rocker/build/cache_test.go b/src/rocker/build/cache_test.go new file mode 100644 index 00000000..9e534745 --- /dev/null +++ b/src/rocker/build/cache_test.go @@ -0,0 +1,68 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCache_Basic(t *testing.T) { + tmpDir := cacheTestTmpDir(t) + defer os.RemoveAll(tmpDir) + + c := NewCacheFS(tmpDir) + + s := State{ + ParentID: "123", + ImageID: "456", + } + if err := c.Put(s); err != nil { + t.Fatal(err) + } + + s2 := State{ + ImageID: "123", + } + res, err := c.Get(s2) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "456", res.ImageID) + + s3 := State{ + ImageID: "789", + } + res2, err := c.Get(s3) + if err != nil { + t.Fatal(err) + } + + assert.Nil(t, res2) +} + +func cacheTestTmpDir(t *testing.T) string { + tmpDir, err := ioutil.TempDir("", "rocker-cache-test") + if err != nil { + t.Fatal(err) + } + return tmpDir +} diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go new file mode 100644 index 00000000..b1d619df --- /dev/null +++ b/src/rocker/build/client.go @@ -0,0 +1,502 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + + "regexp" + "rocker/dockerclient" + "rocker/imagename" + + "github.com/docker/docker/pkg/units" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/term" + "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" +) + +// Client interface +type Client interface { + InspectImage(name string) (*docker.Image, error) + PullImage(name string) error + ListImages() (images []*imagename.ImageName, err error) + ListImageTags(name string) (images []*imagename.ImageName, err error) + RemoveImage(imageID string) error + TagImage(imageID, imageName string) error + PushImage(imageName string) (digest string, err error) + EnsureImage(imageName string) error + CreateContainer(state State) (id string, err error) + RunContainer(containerID string, attachStdin bool) error + CommitContainer(state State, message string) (img *docker.Image, err error) + RemoveContainer(containerID string) error + UploadToContainer(containerID string, stream io.Reader, path string) error + EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) + ResolveHostPath(path string) (resultPath string, err error) +} + +// DockerClient implements the client that works with a docker socket +type DockerClient struct { + client *docker.Client + auth docker.AuthConfiguration +} + +var ( + captureDigest = regexp.MustCompile("digest:\\s*(sha256:[a-f0-9]{64})") +) + +// NewDockerClient makes a new client that works with a docker socket +func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) *DockerClient { + return &DockerClient{ + client: dockerClient, + auth: auth, + } +} + +// InspectImage inspects docker image +// it does not give an error when image not found, but returns nil instead +func (c *DockerClient) InspectImage(name string) (img *docker.Image, err error) { + // We simply return nil in case image not found + if img, err = c.client.InspectImage(name); err == docker.ErrNoSuchImage { + return nil, nil + } + return img, err +} + +// PullImage pulls docker image +func (c *DockerClient) PullImage(name string) error { + + var ( + image = imagename.NewFromString(name) + pipeReader, pipeWriter = io.Pipe() + def = log.StandardLogger() + fdOut, isTerminalOut = term.GetFdInfo(def.Out) + out = def.Out + errch = make(chan error) + ) + + if !isTerminalOut { + out = def.Writer() + } + + opts := docker.PullImageOptions{ + Repository: image.NameWithRegistry(), + Registry: image.Registry, + Tag: image.GetTag(), + OutputStream: pipeWriter, + RawJSONStream: true, + } + + log.Infof("| Pull image %s", image) + log.Debugf("Pull image %s with options: %# v", image, opts) + + go func() { + errch <- jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut) + }() + + if err := c.client.PullImage(opts, c.auth); err != nil { + return err + } + + return <-errch +} + +// ListImages lists all pulled images in the local docker registry +func (c *DockerClient) ListImages() (images []*imagename.ImageName, err error) { + + var dockerImages []docker.APIImages + if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { + return + } + + images = []*imagename.ImageName{} + for _, image := range dockerImages { + for _, repoTag := range image.RepoTags { + images = append(images, imagename.NewFromString(repoTag)) + } + } + + return +} + +// ListImageTags returns the list of images instances obtained from all tags existing in the registry +func (c *DockerClient) ListImageTags(name string) (images []*imagename.ImageName, err error) { + return imagename.RegistryListTags(imagename.NewFromString(name)) +} + +// RemoveImage removes docker image +func (c *DockerClient) RemoveImage(imageID string) error { + log.Infof("| Remove image %.12s", imageID) + + opts := docker.RemoveImageOptions{ + Force: true, + NoPrune: false, + } + return c.client.RemoveImageExtended(imageID, opts) +} + +// CreateContainer creates docker container +func (c *DockerClient) CreateContainer(s State) (string, error) { + + s.Config.Image = s.ImageID + + // TODO: assign human readable name? + + opts := docker.CreateContainerOptions{ + Config: &s.Config, + HostConfig: &s.NoCache.HostConfig, + } + + log.Debugf("Create container: %# v", pretty.Formatter(opts)) + + container, err := c.client.CreateContainer(opts) + if err != nil { + return "", err + } + + imageStr := fmt.Sprintf("(image %.12s)", s.ImageID) + if s.ImageID == "" { + imageStr = "(from scratch)" + } + + log.Infof("| Created container %.12s %s", container.ID, imageStr) + + return container.ID, nil +} + +// RunContainer runs docker container and optionally attaches stdin +func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error { + + var ( + success = make(chan struct{}) + finished = make(chan struct{}, 1) + sigch = make(chan os.Signal, 1) + errch = make(chan error) + + // Wrap output streams with logger + def = log.StandardLogger() + outLogger = &log.Logger{ + Out: def.Out, + Formatter: NewContainerFormatter(containerID, log.InfoLevel), + Level: def.Level, + } + errLogger = &log.Logger{ + Out: def.Out, + Formatter: NewContainerFormatter(containerID, log.ErrorLevel), + Level: def.Level, + } + + in = os.Stdin + fdIn, isTerminalIn = term.GetFdInfo(in) + ) + + attachOpts := docker.AttachToContainerOptions{ + Container: containerID, + OutputStream: outLogger.Writer(), + ErrorStream: errLogger.Writer(), + Stdout: true, + Stderr: true, + Stream: true, + Success: success, + } + + // Used by ATTACH + if attachStdin { + log.Infof("| Attach stdin to the container %.12s", containerID) + + if !isTerminalIn { + return fmt.Errorf("Cannot attach to a container on non tty input") + } + + attachOpts.InputStream = readerVoidCloser{in} + attachOpts.OutputStream = os.Stdout + attachOpts.ErrorStream = os.Stderr + attachOpts.Stdin = true + attachOpts.RawTerminal = true + } + + // We want do debug the final attach options before setting raw term + log.Debugf("Attach to container with options: %# v", attachOpts) + + if attachStdin { + oldState, err := term.SetRawTerminal(fdIn) + if err != nil { + return err + } + defer term.RestoreTerminal(fdIn, oldState) + } + + go func() { + if err := c.client.AttachToContainer(attachOpts); err != nil { + select { + case <-finished: + // Ignore any attach errors when we have finished already. + // It may happen if we attach stdin, then container exit, but then there is other input from stdin continues. + // This is the case when multiple ATTACH command are used in a single Rockerfile. + // The problem though is that we cannot close stdin, to have it available for the subsequent ATTACH; + // therefore, hijack goroutine from the previous ATTACH will hang until the input received and then + // it will fire an error. + // It's ok for `rocker` since it is not a daemon, but rather a one-off command. + // + // Also, there is still a problem that `rocker` loses second character from the Stdin in a second ATTACH. + // But let's consider it a corner case. + default: + // Print the error. We cannot return it because the main routine is handing on WaitContaienr + log.Errorf("Got error while attaching to container %.12s: %s", containerID, err) + } + } + }() + + success <- <-success + + // TODO: support options for container resources constraints like `docker build` has + + if err := c.client.StartContainer(containerID, &docker.HostConfig{}); err != nil { + return err + } + + if attachStdin { + if err := c.monitorTtySize(containerID, os.Stdout); err != nil { + return fmt.Errorf("Failed to monitor TTY size for container %.12s, error: %s", containerID, err) + } + } + + // TODO: move signal handling to the builder? + + signal.Notify(sigch, os.Interrupt) + + go func() { + statusCode, err := c.client.WaitContainer(containerID) + // log.Debugf("Wait finished, status %q error %q", statusCode, err) + if err != nil { + errch <- err + } else if statusCode != 0 { + errch <- fmt.Errorf("Container %.12s exited with code %d", containerID, statusCode) + } + errch <- nil + return + }() + + select { + case err := <-errch: + // indicate 'finished' so the `attach` goroutine will not give any errors + finished <- struct{}{} + if err != nil { + return err + } + case <-sigch: + // TODO: Removing container twice for some reason + log.Infof("Received SIGINT, remove current container...") + if err := c.RemoveContainer(containerID); err != nil { + log.Errorf("Failed to remove container: %s", err) + } + // TODO: send signal to builder.Run() and have a proper cleanup + os.Exit(2) + } + + return nil +} + +// CommitContainer commits docker container +func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, error) { + commitOpts := docker.CommitContainerOptions{ + Container: s.NoCache.ContainerID, + Message: message, + Run: &s.Config, + } + + log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) + + image, err := c.client.CommitContainer(commitOpts) + if err != nil { + return nil, err + } + + // Inspect the image to get the real size + log.Debugf("Inspect image %s", image.ID) + + if image, err = c.client.InspectImage(image.ID); err != nil { + return nil, err + } + + size := fmt.Sprintf("%s (+%s)", + units.HumanSize(float64(image.VirtualSize)), + units.HumanSize(float64(image.Size)), + ) + + log.WithFields(log.Fields{ + "size": size, + }).Infof("| Result image is %.12s", image.ID) + + return image, nil +} + +// RemoveContainer removes docker container +func (c *DockerClient) RemoveContainer(containerID string) error { + log.Infof("| Removing container %.12s", containerID) + + opts := docker.RemoveContainerOptions{ + ID: containerID, + Force: true, + RemoveVolumes: true, + } + + return c.client.RemoveContainer(opts) +} + +// UploadToContainer uploads files to a docker container +func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, path string) error { + log.Infof("| Uploading files to container %.12s", containerID) + + opts := docker.UploadToContainerOptions{ + InputStream: stream, + Path: path, + NoOverwriteDirNonDir: false, + } + + return c.client.UploadToContainer(containerID, opts) +} + +// TagImage adds tag to the image +func (c *DockerClient) TagImage(imageID, imageName string) error { + img := imagename.NewFromString(imageName) + + log.Infof("| Tag %.12s -> %s", imageID, img) + + opts := docker.TagImageOptions{ + Repo: img.NameWithRegistry(), + Tag: img.GetTag(), + Force: true, + } + + log.Debugf("Tag image %s with options: %# v", imageID, opts) + + return c.client.TagImage(imageID, opts) +} + +// PushImage pushes the image +func (c *DockerClient) PushImage(imageName string) (digest string, err error) { + var ( + img = imagename.NewFromString(imageName) + + buf bytes.Buffer + pipeReader, pipeWriter = io.Pipe() + outStream = io.MultiWriter(pipeWriter, &buf) + def = log.StandardLogger() + fdOut, isTerminalOut = term.GetFdInfo(def.Out) + out = def.Out + + opts = docker.PushImageOptions{ + Name: img.NameWithRegistry(), + Tag: img.GetTag(), + Registry: img.Registry, + OutputStream: outStream, + RawJSONStream: true, + } + ) + + if !isTerminalOut { + out = def.Writer() + } + + log.Infof("| Push %s", img) + + log.Debugf("Push with options: %# v", opts) + + // TODO: DisplayJSONMessagesStream may fail by client.PushImage run without errors + go func() { + if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut); err != nil { + log.Errorf("Failed to process json stream, error %s", err) + } + }() + + if err := c.client.PushImage(opts, c.auth); err != nil { + return "", err + } + pipeWriter.Close() + + // It is the best way to have pushed image digest so far + matches := captureDigest.FindStringSubmatch(buf.String()) + if len(matches) > 0 { + digest = matches[1] + } + + return digest, nil +} + +// ResolveHostPath proxy for the dockerclient.ResolveHostPath +func (c *DockerClient) ResolveHostPath(path string) (resultPath string, err error) { + return dockerclient.ResolveHostPath(path, c.client) +} + +// EnsureImage checks if the image exists and pulls if not +func (c *DockerClient) EnsureImage(imageName string) (err error) { + + var img *docker.Image + if img, err = c.client.InspectImage(imageName); err != nil && err != docker.ErrNoSuchImage { + return err + } + if img != nil { + return nil + } + + return c.PullImage(imageName) +} + +// EnsureContainer checks if container with specified name exists +// and creates it otherwise +func (c *DockerClient) EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) { + + // Check if container exists + container, err := c.client.InspectContainer(containerName) + + if _, ok := err.(*docker.NoSuchContainer); !ok && err != nil { + return "", err + } + if container != nil { + return container.ID, nil + } + + // No data volume container for this build, create it + + if err := c.EnsureImage(config.Image); err != nil { + return "", fmt.Errorf("Failed to check image %s, error: %s", config.Image, err) + } + + log.Infof("| Create container: %s for %s", containerName, purpose) + + opts := docker.CreateContainerOptions{ + Name: containerName, + Config: config, + } + + log.Debugf("Create container options %# v", opts) + + container, err = c.client.CreateContainer(opts) + if err != nil { + return "", fmt.Errorf("Failed to create container %s from image %s, error: %s", containerName, config.Image, err) + } + + return container.ID, err +} diff --git a/src/rocker/build/tty.go b/src/rocker/build/client_tty.go similarity index 54% rename from src/rocker/build/tty.go rename to src/rocker/build/client_tty.go index f198fca4..78b15810 100644 --- a/src/rocker/build/tty.go +++ b/src/rocker/build/client_tty.go @@ -5,28 +5,29 @@ package build import ( - "fmt" + "io" "os" gosignal "os/signal" "runtime" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" ) -func (builder *Builder) monitorTtySize(id string) error { - builder.resizeTty(id) +func (c *DockerClient) monitorTtySize(id string, out io.Writer) error { + c.resizeTty(id, out) if runtime.GOOS == "windows" { go func() { - prevH, prevW := builder.getTtySize() + prevH, prevW := c.getTtySize(out) for { time.Sleep(time.Millisecond * 250) - h, w := builder.getTtySize() + h, w := c.getTtySize(out) if prevW != w || prevH != h { - builder.resizeTty(id) + c.resizeTty(id, out) } prevH = h prevW = w @@ -37,34 +38,40 @@ func (builder *Builder) monitorTtySize(id string) error { gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for range sigchan { - builder.resizeTty(id) + c.resizeTty(id, out) } }() } return nil } -func (builder *Builder) resizeTty(id string) { - height, width := builder.getTtySize() +func (c *DockerClient) resizeTty(id string, out io.Writer) { + height, width := c.getTtySize(out) if height == 0 && width == 0 { return } - if err := builder.Docker.ResizeContainerTTY(id, height, width); err != nil { - fmt.Fprintf(builder.OutStream, "Failed to resize container TTY %s, error: %s\n", id, err) + if err := c.client.ResizeContainerTTY(id, height, width); err != nil { + log.Errorf("Failed to resize container TTY %.12s, error: %s\n", id, err) } } -func (builder *Builder) getTtySize() (int, int) { - if !builder.isTerminalOut { +func (c *DockerClient) getTtySize(out io.Writer) (int, int) { + var ( + fdOut, isTerminalOut = term.GetFdInfo(out) + ) + + if !isTerminalOut { return 0, 0 } - ws, err := term.GetWinsize(builder.fdOut) + + ws, err := term.GetWinsize(fdOut) if err != nil { - fmt.Fprintf(builder.OutStream, "Error getting TTY size: %s\n", err) + log.Errorf("Error getting TTY size: %s\n", err) if ws == nil { return 0, 0 } } + return int(ws.Height), int(ws.Width) } diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 46dbcf32..f6917a7d 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -17,552 +17,1376 @@ package build import ( - "encoding/json" "fmt" "io/ioutil" "os" - "os/user" "path" "path/filepath" - "sort" - "strings" - - "rocker/dockerclient" + "regexp" "rocker/imagename" - "rocker/parser" - "rocker/template" + "rocker/shellparser" "rocker/util" + "sort" + "strings" + "time" + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" + "github.com/go-yaml/yaml" + "github.com/kr/pretty" ) -// cmdRun implements RUN command -// If there were no MOUNTs before, rocker falls back to `docker build` to run it -func (builder *Builder) cmdRun(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - cmd := handleJSONArgs(args, attributes) +// ConfigCommand configuration parameters for any command +type ConfigCommand struct { + name string + args []string + attrs map[string]bool + flags map[string]string + original string + isOnbuild bool +} + +// Command interface describes and command that is executed by build +type Command interface { + // Execute does the command execution and returns modified state. + // Note that here we use State not by reference because we want + // it to be immutable. In future, it may encoded/decoded from json + // and passed to the external command implementations. + Execute(b *Build) (State, error) - if !attributes["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) + // Returns true if the command should be executed + ShouldRun(b *Build) (bool, error) + + // String returns the human readable string representation of the command + String() string +} + +// EnvReplacableCommand interface describes the command that can replace ENV +// variables into arguments of itself +type EnvReplacableCommand interface { + ReplaceEnv(env []string) error +} + +// NewCommand make a new command according to the configuration given +func NewCommand(cfg ConfigCommand) (cmd Command, err error) { + // TODO: use reflection? + switch cfg.name { + case "from": + cmd = &CommandFrom{cfg} + case "maintainer": + cmd = &CommandMaintainer{cfg} + case "run": + cmd = &CommandRun{cfg} + case "attach": + cmd = &CommandAttach{cfg} + case "env": + cmd = &CommandEnv{cfg} + case "label": + cmd = &CommandLabel{cfg} + case "workdir": + cmd = &CommandWorkdir{cfg} + case "tag": + cmd = &CommandTag{cfg} + case "push": + cmd = &CommandPush{cfg} + case "copy": + cmd = &CommandCopy{cfg} + case "add": + cmd = &CommandAdd{cfg} + case "cmd": + cmd = &CommandCmd{cfg} + case "entrypoint": + cmd = &CommandEntrypoint{cfg} + case "expose": + cmd = &CommandExpose{cfg} + case "volume": + cmd = &CommandVolume{cfg} + case "user": + cmd = &CommandUser{cfg} + case "onbuild": + cmd = &CommandOnbuild{cfg} + case "mount": + cmd = &CommandMount{cfg} + case "export": + cmd = &CommandExport{cfg} + case "import": + cmd = &CommandImport{cfg} + default: + return nil, fmt.Errorf("Unknown command: %s", cfg.name) + } + + if cfg.isOnbuild { + cmd = &CommandOnbuildWrap{cmd} } - return builder.runAndCommit(cmd, "run") + return cmd, nil } -// cmdMount implements MOUNT command -// TODO: document behavior of cmdMount -func (builder *Builder) cmdMount(args []string, attributes map[string]bool, flags map[string]string, original string) error { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// CommandFrom implements FROM +type CommandFrom struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandFrom) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandFrom) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandFrom) Execute(b *Build) (s State, err error) { + // TODO: for "scratch" image we may use /images/create + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("FROM requires one argument") } - // TODO: read flags - useCache := false + var ( + img *docker.Image + name = c.cfg.args[0] + ) - newMounts := []*builderMount{} - newVolumeMounts := []*builderMount{} + if name == "scratch" { + s.NoBaseImage = true + return s, nil + } - for _, arg := range args { - var mount builderMount - if strings.Contains(arg, ":") { - pair := strings.SplitN(arg, ":", 2) - mount = builderMount{cache: useCache, src: pair[0], dest: pair[1]} - } else { - mount = builderMount{cache: useCache, dest: arg} - } + if img, err = b.lookupImage(name); err != nil { + return s, fmt.Errorf("FROM error: %s", err) + } - if mount.src == "" { - newVolumeMounts = append(newVolumeMounts, &mount) - } else { - // Process relative paths in volumes - if strings.HasPrefix(mount.src, "~") { - mount.src = strings.Replace(mount.src, "~", os.Getenv("HOME"), 1) - } - if !path.IsAbs(mount.src) { - mount.src = path.Join(builder.ContextDir, mount.src) - } - mount.origSrc = mount.src + if img == nil { + return s, fmt.Errorf("FROM: image %s not found", name) + } - var err error + // We want to say the size of the FROM image. Better to do it + // from the client, but don't know how to do it better, + // without duplicating InspectImage calls and making unnecessary functions - if mount.src, err = dockerclient.ResolveHostPath(mount.src, builder.Docker); err != nil { - return err - } + log.WithFields(log.Fields{ + "size": units.HumanSize(float64(img.VirtualSize)), + }).Infof("| Image %.12s", img.ID) + + s = b.state + s.ImageID = img.ID + s.Config = docker.Config{} + + if img.Config != nil { + s.Config = *img.Config + } + + b.ProducedSize = 0 + b.VirtualSize = img.VirtualSize + + // If we don't have OnBuild triggers, then we are done + if len(s.Config.OnBuild) == 0 { + return s, nil + } + + log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) + + // Remove them from the config, since the config will be committed. + s.InjectCommands = s.Config.OnBuild + s.Config.OnBuild = []string{} + + return s, nil +} + +// CommandMaintainer implements CMD +type CommandMaintainer struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandMaintainer) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandMaintainer) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandMaintainer) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") + } + + // Don't see any sense of doing a commit here, as Docker does + + return b.state, nil +} + +// CommandCleanup cleans the builder state before the next FROM +type CommandCleanup struct { + final bool + tagged bool +} + +// String returns the human readable string representation of the command +func (c *CommandCleanup) String() string { + return "Cleaning up" +} + +// ShouldRun returns true if the command should be executed +func (c *CommandCleanup) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandCleanup) Execute(b *Build) (State, error) { + s := b.state + + if b.cfg.NoGarbage && !c.tagged && s.ImageID != "" && s.ProducedImage { + if err := b.client.RemoveImage(s.ImageID); err != nil { + return s, err } + } + + // Cleanup state + dirtyState := s + s = NewState(b) + + // Keep some stuff between froms + s.ExportsID = dirtyState.ExportsID + + // For final cleanup we want to keep imageID + if c.final { + s.ImageID = dirtyState.ImageID + } else { + log.Infof("====================================") + } + + return s, nil +} + +// CommandCommit commits collected changes +type CommandCommit struct{} + +// String returns the human readable string representation of the command +func (c *CommandCommit) String() string { + return "Commit changes" +} + +// ShouldRun returns true if the command should be executed +func (c *CommandCommit) ShouldRun(b *Build) (bool, error) { + return b.state.GetCommits() != "", nil +} + +// Execute runs the command +func (c *CommandCommit) Execute(b *Build) (s State, err error) { + s = b.state + + commits := s.GetCommits() + if commits == "" { + return s, nil + } - newMounts = append(newMounts, &mount) + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `from` prior to commit") } - // For volume mounts we need to create (or use existing) volume container - if len(newVolumeMounts) > 0 { - // Collect destinations and sort them alphabetically - // so changing the order on MOUNT commend does not have any effect - dests := make([]string, len(newVolumeMounts)) - containerVolumes := make(map[string]struct{}) + // TODO: ? + // if len(commits) == 0 && s.NoCache.ContainerID == "" { log.Infof("| Skip") + + // TODO: verify that we need to check cache in commit only for + // a non-container actions - for i, mount := range newVolumeMounts { - dests[i] = mount.dest - containerVolumes[mount.dest] = struct{}{} + if s.NoCache.ContainerID == "" { + + // Check cache + var hit bool + s, hit, err = b.probeCache(s) + if err != nil { + return s, err } - sort.Strings(dests) - - volumeContainerName := builder.mountsContainerName(dests) - - containerConfig := &docker.Config{ - Image: busyboxImage, - Volumes: containerVolumes, - Labels: map[string]string{ - "Volumes": strings.Join(dests, ":"), - "Rockerfile": builder.Rockerfile, - "ImageId": builder.imageID, - }, + if hit { + return s, nil } - container, err := builder.ensureContainer(volumeContainerName, containerConfig, strings.Join(dests, ",")) - if err != nil { - return err + origCmd := s.Config.Cmd + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} + + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err } - // Assing volume container to the list of volume mounts - for _, mount := range newVolumeMounts { - mount.containerID = container.ID + s.Config.Cmd = origCmd + } + + defer func(id string) { + s.CleanCommits() + if err := b.client.RemoveContainer(id); err != nil { + log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) } + }(s.NoCache.ContainerID) + + var img *docker.Image + if img, err = b.client.CommitContainer(s, commits); err != nil { + return s, err } - mountIds := make([]string, len(newMounts)) + s.NoCache.ContainerID = "" + s.ParentID = s.ImageID + s.ImageID = img.ID + s.ProducedImage = true - for i, mount := range newMounts { - builder.addMount(*mount) - mountIds[i] = mount.String() + if b.cache != nil { + if err := b.cache.Put(s); err != nil { + return s, err + } } - // TODO: check is useCache flag enabled, so we have to make checksum of the directory + // Store some stuff to the build + b.ProducedSize += img.Size + b.VirtualSize = img.VirtualSize + + return s, nil +} + +// CommandRun implements RUN +type CommandRun struct { + cfg ConfigCommand +} - if err := builder.commitContainer("", builder.Config.Cmd, fmt.Sprintf("MOUNT %q", mountIds)); err != nil { - return err +// String returns the human readable string representation of the command +func (c *CommandRun) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandRun) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandRun) Execute(b *Build) (s State, err error) { + s = b.state + + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") } - return nil + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + s.Commit("RUN %q", cmd) + + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + + // TODO: test with ENTRYPOINT + + // We run this command in the container using CMD + origCmd := s.Config.Cmd + origEntrypoint := s.Config.Entrypoint + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + if err = b.client.RunContainer(s.NoCache.ContainerID, false); err != nil { + b.client.RemoveContainer(s.NoCache.ContainerID) + return s, err + } + + // Restore command after commit + s.Config.Cmd = origCmd + s.Config.Entrypoint = origEntrypoint + + return s, nil +} + +// CommandAttach implements ATTACH +type CommandAttach struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandAttach) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandAttach) ShouldRun(b *Build) (bool, error) { + // TODO: skip attach? + return true, nil +} + +// Execute runs the command +func (c *CommandAttach) Execute(b *Build) (s State, err error) { + s = b.state + + // simply ignore this command if we don't wanna attach + if !b.cfg.Attach { + log.Infof("Skip ATTACH; use --attach option to get inside") + // s.SkipCommit() + return s, nil + } + + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to ATTACH") + } + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if len(cmd) == 0 { + cmd = []string{"/bin/sh"} + } else if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + // TODO: do s.commit unique + + // We run this command in the container using CMD + + // Backup the config so we can restore it later + origState := s + defer func() { + s = origState + }() + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + s.Config.Tty = true + s.Config.OpenStdin = true + s.Config.StdinOnce = true + s.Config.AttachStdin = true + s.Config.AttachStderr = true + s.Config.AttachStdout = true + + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + if err = b.client.RunContainer(s.NoCache.ContainerID, true); err != nil { + b.client.RemoveContainer(s.NoCache.ContainerID) + return s, err + } + + return s, nil +} + +// CommandEnv implements ENV +type CommandEnv struct { + cfg ConfigCommand } -// cmdExport implements EXPORT command -// TODO: document behavior of cmdExport -func (builder *Builder) cmdExport(args []string, attributes map[string]bool, flags map[string]string, original string) error { +// String returns the human readable string representation of the command +func (c *CommandEnv) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandEnv) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandEnv) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) + return s, fmt.Errorf("ENV requires at least one argument") } - // If only one argument was given to EXPORT, use basename of a file - // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} + + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to ENV, too many args") } - dest := args[len(args)-1] // last one is always the dest + commitStr := "ENV" + + for j := 0; j < len(args); j += 2 { + // name ==> args[j] + // value ==> args[j+1] + newVar := strings.Join(args[j:j+2], "=") + commitStr += " " + newVar + + gotOne := false + for i, envVar := range s.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + s.Config.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + s.Config.Env = append(s.Config.Env, newVar) + } + } - // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir + s.Commit(commitStr) - exportsContainerID, err := builder.makeExportsContainer() - if err != nil { - return err + return s, nil +} + +// CommandLabel implements LABEL +type CommandLabel struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandLabel) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandLabel) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandLabel) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("LABEL requires at least one argument") } - // prepare builder mount - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: exportsContainerID, - }) - defer builder.removeLastMount() + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to LABEL, too many args") + } - cmdDestPath, err := util.ResolvePath(exportsVolume, dest) - if err != nil { - return fmt.Errorf("Invalid EXPORT destination: %s", dest) + commitStr := "LABEL" + + if s.Config.Labels == nil { + s.Config.Labels = map[string]string{} } - // TODO: rsync doesn't work as expected if ENTRYPOINT is inherited by parent image - // STILL RELEVANT? + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar - // build the command - cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} - cmd = append(cmd, args[0:len(args)-1]...) - cmd = append(cmd, cmdDestPath) + s.Config.Labels[args[j]] = args[j+1] + j++ + } + + s.Commit(commitStr) + + return s, nil +} + +// CommandWorkdir implements WORKDIR +type CommandWorkdir struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandWorkdir) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { + return true, nil +} - // For caching - builder.addLabels(map[string]string{ - "rocker-exportsContainerId": exportsContainerID, - }) +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandWorkdir) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - builder.Config.Entrypoint = []string{} - }) - defer resetFunc() +// Execute runs the command +func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { - fmt.Fprintf(builder.OutStream, "[Rocker] run: %s\n", strings.Join(cmd, " ")) + s = b.state - if err := builder.runAndCommit(cmd, "import"); err != nil { - return err + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("WORKDIR requires exactly one argument") } - builder.lastExportImageID = builder.imageID + workdir := c.cfg.args[0] - return nil + if !filepath.IsAbs(workdir) { + current := s.Config.WorkingDir + workdir = filepath.Join("/", current, workdir) + } + + s.Config.WorkingDir = workdir + + s.Commit(fmt.Sprintf("WORKDIR %v", workdir)) + + return s, nil } -// cmdImport implements IMPORT command -// TODO: document behavior of cmdImport -func (builder *Builder) cmdImport(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// CommandCmd implements CMD +type CommandCmd struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandCmd) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandCmd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandCmd) Execute(b *Build) (s State, err error) { + s = b.state + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) } - if builder.lastExportImageID == "" { - return fmt.Errorf("You have to EXPORT something first in order to: %s", original) + + s.Config.Cmd = cmd + + s.Commit(fmt.Sprintf("CMD %q", cmd)) + + if len(c.cfg.args) != 0 { + s.NoCache.CmdSet = true } - if builder.exportsContainerID == "" { - return fmt.Errorf("Something went wrong, missing exports container: %s", original) + + return s, nil +} + +// CommandEntrypoint implements ENTRYPOINT +type CommandEntrypoint struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandEntrypoint) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandEntrypoint) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { + s = b.state + + parsed := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + switch { + case c.cfg.attrs["json"]: + // ENTRYPOINT ["echo", "hi"] + s.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + s.Config.Entrypoint = []string{} + default: + // ENTRYPOINT echo hi + s.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} } - // If only one argument was given to IMPORT, use the same path for destination - // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} + + s.Commit(fmt.Sprintf("ENTRYPOINT %q", s.Config.Entrypoint)) + + // TODO: test this + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !s.NoCache.CmdSet { + s.Config.Cmd = nil } - dest := args[len(args)-1] // last one is always the dest - // prepare builder mount - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: builder.exportsContainerID, - }) - defer builder.removeLastMount() + return s, nil +} + +// CommandExpose implements EXPOSE +type CommandExpose struct { + cfg ConfigCommand +} - // TODO: rsync doesn't work as expected if ENTRYPOINT is inherited by parent image - // STILL RELEVANT? +// String returns the human readable string representation of the command +func (c *CommandExpose) String() string { + return c.cfg.original +} - cmd := []string{"/opt/rsync/bin/rsync", "-a"} - for _, arg := range args[0 : len(args)-1] { - argResolved, err := util.ResolvePath(exportsVolume, arg) - if err != nil { - return fmt.Errorf("Invalid IMPORT source: %s", arg) +// ShouldRun returns true if the command should be executed +func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandExpose) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandExpose) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("EXPOSE requires at least one argument") + } + + if s.Config.ExposedPorts == nil { + s.Config.ExposedPorts = map[docker.Port]struct{}{} + } + + ports, _, err := nat.ParsePortSpecs(c.cfg.args) + if err != nil { + return s, err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + dockerPort := docker.Port(port) + if _, exists := s.Config.ExposedPorts[dockerPort]; !exists { + s.Config.ExposedPorts[dockerPort] = struct{}{} } - cmd = append(cmd, argResolved) + portList[i] = string(port) + i++ } - cmd = append(cmd, dest) + sort.Strings(portList) - // For caching - builder.addLabels(map[string]string{ - "rocker-lastExportImageId": builder.lastExportImageID, - }) + message := fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")) + s.Commit(message) - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - builder.Config.Entrypoint = []string{} - }) - defer resetFunc() + return s, nil +} - fmt.Fprintf(builder.OutStream, "[Rocker] run: %s\n", strings.Join(cmd, " ")) +// CommandVolume implements VOLUME +type CommandVolume struct { + cfg ConfigCommand +} - return builder.runAndCommit(cmd, "import") +// String returns the human readable string representation of the command +func (c *CommandVolume) String() string { + return c.cfg.original } -// cmdTag implements TAG command -// TODO: document behavior of cmdTag -func (builder *Builder) cmdTag(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - builder.recentTags = []*imagename.ImageName{} - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// ShouldRun returns true if the command should be executed +func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandVolume) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandVolume) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("VOLUME requires at least one argument") } - image := imagename.NewFromString(args[0]) - // Save rockerfile to label, sot it can be inspected later - if builder.AddMeta && !builder.metaAdded { - data := &RockerImageData{ - ImageName: image, - Rockerfile: builder.RockerfileContent, - Vars: builder.CliVars, - Properties: template.Vars{}, + if s.Config.Volumes == nil { + s.Config.Volumes = map[string]struct{}{} + } + for _, v := range c.cfg.args { + v = strings.TrimSpace(v) + if v == "" { + return s, fmt.Errorf("Volume specified can not be an empty string") } + s.Config.Volumes[v] = struct{}{} + } - if hostname, _ := os.Hostname(); hostname != "" { - data.Properties["hostname"] = hostname - } - if user, _ := user.Current(); user != nil { - data.Properties["system_login"] = user.Username - data.Properties["system_user"] = user.Name - } + s.Commit(fmt.Sprintf("VOLUME %v", c.cfg.args)) - json, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("Failed to marshal rocker data, error: %s", err) - } + return s, nil +} + +// CommandUser implements USER +type CommandUser struct { + cfg ConfigCommand +} - builder.addLabels(map[string]string{ - "rocker-data": string(json), - }) +// String returns the human readable string representation of the command +func (c *CommandUser) String() string { + return c.cfg.original +} - fmt.Fprintf(builder.OutStream, "[Rocker] add rocker-data label\n") +// ShouldRun returns true if the command should be executed +func (c *CommandUser) ShouldRun(b *Build) (bool, error) { + return true, nil +} - if err := builder.commitContainer("", builder.Config.Cmd, "LABEL rocker-data"); err != nil { - return err - } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandUser) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandUser) Execute(b *Build) (s State, err error) { - builder.metaAdded = true + s = b.state + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("USER requires exactly one argument") } - doTag := func(tag string) error { - img := &imagename.ImageName{ - Registry: image.Registry, - Name: image.Name, - Tag: tag, - } - builder.recentTags = append(builder.recentTags, img) + s.Config.User = c.cfg.args[0] - fmt.Fprintf(builder.OutStream, "[Rocker] Tag %.12s -> %s\n", builder.imageID, img) + s.Commit(fmt.Sprintf("USER %v", c.cfg.args)) - err := builder.Docker.TagImage(builder.imageID, docker.TagImageOptions{ - Repo: img.NameWithRegistry(), - Tag: img.GetTag(), - Force: true, - }) - if err != nil { - return fmt.Errorf("Failed to set tag %s to image %s", img, builder.imageID) - } - return nil + return s, nil +} + +// CommandOnbuild implements ONBUILD +type CommandOnbuild struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandOnbuild) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandOnbuild) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("ONBUILD requires at least one argument") } - // By default, tag with current branch name if tag is not specified - // do not use :latest unless it was set explicitly - if !image.HasTag() { - if builder.Vars.IsSet("branch") && builder.Vars["branch"].(string) != "" { - image.Tag = builder.Vars["branch"].(string) - } - // Additionally, tag image with current git sha - if builder.Vars.IsSet("commit") && builder.Vars["commit"] != "" { - if err := doTag(fmt.Sprintf("%.7s", builder.Vars["commit"])); err != nil { - return err - } - } + command := strings.ToUpper(strings.TrimSpace(c.cfg.args[0])) + switch command { + case "ONBUILD": + return s, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return s, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", command) } - // Do the asked tag - if err := doTag(image.GetTag()); err != nil { - return err + orig := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(c.cfg.original, "") + + s.Config.OnBuild = append(s.Config.OnBuild, orig) + s.Commit(fmt.Sprintf("ONBUILD %s", orig)) + + return s, nil +} + +// CommandTag implements TAG +type CommandTag struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandTag) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandTag) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandTag) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("TAG requires exactly one argument") + } + + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot TAG on empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + + return b.state, nil +} + +// CommandPush implements PUSH +type CommandPush struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandPush) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandPush) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandPush) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("PUSH requires exactly one argument") + } + + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot PUSH empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + + image := imagename.NewFromString(c.cfg.args[0]) + artifact := imagename.Artifact{ + Name: image, + Pushed: b.cfg.Push, + Tag: image.GetTag(), + ImageID: b.state.ImageID, + BuildTime: time.Now(), } - // Optionally make a semver aliases - if _, ok := flags["semver"]; ok && image.HasTag() { - ver, err := NewSemver(image.GetTag()) + // push image and add some lines to artifacts + if b.cfg.Push { + digest, err := b.client.PushImage(image.String()) if err != nil { - return fmt.Errorf("--semver flag expects tag to be in semver format, error: %s", err) + return b.state, err } - // If the version is like 1.2.3-build512 we also want to alias 1.2.3 - if ver.HasSuffix() { - if err := doTag(fmt.Sprintf("%d.%d.%d", ver.Major, ver.Minor, ver.Patch)); err != nil { - return err - } + artifact.Digest = digest + artifact.Addressable = fmt.Sprintf("%s@%s", image.NameWithRegistry(), digest) + } else { + log.Infof("| Don't push. Pass --push flag to actually push to the registry") + } + + // Publish artifact files + if b.cfg.ArtifactsPath != "" { + if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { + return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) } - if err := doTag(fmt.Sprintf("%d.%d.x", ver.Major, ver.Minor)); err != nil { - return err + + filePath := filepath.Join(b.cfg.ArtifactsPath, artifact.GetFileName()) + + artifacts := imagename.Artifacts{ + []imagename.Artifact{artifact}, } - if err := doTag(fmt.Sprintf("%d.x", ver.Major)); err != nil { - return err + content, err := yaml.Marshal(artifacts) + if err != nil { + return b.state, err + } + + if err := ioutil.WriteFile(filePath, content, 0644); err != nil { + return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) } + + log.Infof("| Saved artifact file %s", filePath) + log.Debugf("Artifact properties: %# v", pretty.Formatter(artifact)) } - return nil + return b.state, nil +} + +// CommandCopy implements COPY +type CommandCopy struct { + cfg ConfigCommand } -// cmdPush implements PUSH command -// TODO: document behavior of cmdPush -func (builder *Builder) cmdPush(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if err := builder.cmdTag(args, attributes, flags, original); err != nil { - return fmt.Errorf("Failed to tag image, error: %s", err) +// String returns the human readable string representation of the command +func (c *CommandCopy) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandCopy) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandCopy) Execute(b *Build) (State, error) { + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("COPY requires at least two arguments") } + return copyFiles(b, c.cfg.args, "COPY") +} - if !builder.Push { - fmt.Fprintf(builder.OutStream, "[Rocker] *** just tagged; pass --push flag to actually push to a registry\n") - return nil +// CommandAdd implements ADD +// For now it is an alias of COPY, but later will add urls and archives to it +type CommandAdd struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandAdd) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandAdd) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + +// Execute runs the command +func (c *CommandAdd) Execute(b *Build) (State, error) { + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("ADD requires at least two arguments") } + return copyFiles(b, c.cfg.args, "ADD") +} - for _, image := range builder.recentTags { - fmt.Fprintf(builder.OutStream, "[Rocker] Push %.12s -> %s\n", builder.imageID, image) +// CommandMount implements MOUNT +type CommandMount struct { + cfg ConfigCommand +} - digest, err := builder.pushImage(*image) - if err != nil { - return err - } +// String returns the human readable string representation of the command +func (c *CommandMount) String() string { + return c.cfg.original +} - if builder.ArtifactsPath != "" { - if err := os.MkdirAll(builder.ArtifactsPath, 0755); err != nil { - return fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", builder.ArtifactsPath, err) +// ShouldRun returns true if the command should be executed +func (c *CommandMount) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandMount) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return b.state, fmt.Errorf("MOUNT requires at least one argument") + } + + commitIds := []string{} + + for _, arg := range c.cfg.args { + + switch strings.Contains(arg, ":") { + // MOUNT src:dest + case true: + var ( + pair = strings.SplitN(arg, ":", 2) + src = pair[0] + dest = pair[1] + err error + ) + + // Process relative paths in volumes + if strings.HasPrefix(src, "~") { + src = strings.Replace(src, "~", os.Getenv("HOME"), 1) + } + if !path.IsAbs(src) { + src = path.Join(b.cfg.ContextDir, src) + } + + if src, err = b.client.ResolveHostPath(src); err != nil { + return s, err + } + + if s.NoCache.HostConfig.Binds == nil { + s.NoCache.HostConfig.Binds = []string{} } - filePath := filepath.Join(builder.ArtifactsPath, image.GetTag()) - lines := []string{ - fmt.Sprintf("Name: %s", image), - fmt.Sprintf("Tag: %s", image.GetTag()), - fmt.Sprintf("ImageID: %s", builder.imageID), - fmt.Sprintf("Digest: %s", digest), - fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), + + s.NoCache.HostConfig.Binds = append(s.NoCache.HostConfig.Binds, src+":"+dest) + commitIds = append(commitIds, arg) + + // MOUNT dir + case false: + name, err := b.getVolumeContainer(arg) + if err != nil { + return s, err } - content := []byte(strings.Join(lines, "\n") + "\n") - if err := ioutil.WriteFile(filePath, content, 0644); err != nil { - return fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) + if s.NoCache.HostConfig.VolumesFrom == nil { + s.NoCache.HostConfig.VolumesFrom = []string{} } - fmt.Fprintf(builder.OutStream, "[Rocker] Save artifact file %s\n", filePath) + s.NoCache.HostConfig.VolumesFrom = append(s.NoCache.HostConfig.VolumesFrom, name) + commitIds = append(commitIds, name+":"+arg) } } - return nil + s.Commit(fmt.Sprintf("MOUNT %q", commitIds)) + + return s, nil } -// cmdRequire implements REQUIRE command -// TODO: document behavior of cmdRequire -func (builder *Builder) cmdRequire(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) - } - for _, requireVar := range args { - if !builder.Vars.IsSet(requireVar) { - return fmt.Errorf("Var $%s is required but not set", requireVar) - } - } - return nil +// CommandExport implements EXPORT +type CommandExport struct { + cfg ConfigCommand } -// cmdVar implements VAR command -// it is deprecated due to templating functionality, see: https://github.com/grammarly/rocker#templating -func (builder *Builder) cmdVar(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) - } - for i := 0; i < len(args); i += 2 { - key := args[i] - value := args[i+1] - if !builder.Vars.IsSet(key) { - builder.Vars[key] = value - } - } - return nil +// String returns the human readable string representation of the command +func (c *CommandExport) String() string { + return c.cfg.original } -// cmdInclude implements INCLUDE command -// TODO: document behavior of cmdInclude -func (builder *Builder) cmdInclude(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { +// ShouldRun returns true if the command should be executed +func (c *CommandExport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandExport) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) + return s, fmt.Errorf("EXPORT requires at least one argument") } - module := args[0] - contextDir := filepath.Dir(builder.Rockerfile) - resultPath := filepath.Clean(path.Join(contextDir, module)) + // If only one argument was given to EXPORT, use basename of a file + // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} + } - // TODO: protect against going out of working directory? + src := args[0 : len(args)-1] + dest := args[len(args)-1] // last one is always the dest + + // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir - stat, err := os.Stat(resultPath) + exportsContainerID, err := b.getExportsContainer() if err != nil { - return err - } - if !stat.Mode().IsRegular() { - return fmt.Errorf("Expected included resource to be a regular file: %s (%s)", module, original) + return s, err } - fd, err := os.Open(resultPath) + // build the command + cmdDestPath, err := util.ResolvePath(ExportsPath, dest) if err != nil { - return err + return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) } - defer fd.Close() - includedNode, err := parser.Parse(fd) + s.Commit("EXPORT %q to %.12s:%s", src, exportsContainerID, dest) + + s, hit, err := b.probeCache(s) if err != nil { - return err + return s, err } - - for _, node := range includedNode.Children { - if node.Value == "include" { - return fmt.Errorf("Nesting includes is not allowed: \"%s\" in %s", original, resultPath) - } + if hit { + b.exports = append(b.exports, s.ExportsID) + return s, nil } - // inject included commands info root node at current execution position - after := append(includedNode.Children, builder.rootNode.Children[builder.i+1:]...) - builder.rootNode.Children = append(builder.rootNode.Children[:builder.i], after...) - builder.i-- + // Remember original stuff so we can restore it when we finished + var exportsID string + origState := s - return nil -} + defer func() { + s = origState + s.ExportsID = exportsID + b.exports = append(b.exports, exportsID) + }() -// cmdAttach implements ATTACH command -// TODO: document behavior of cmdAttach -func (builder *Builder) cmdAttach(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - // simply ignore this command if we don't wanna attach - if !builder.Attach { - fmt.Fprintf(builder.OutStream, "[Rocker] Skipping ATTACH; use --attach option to get inside\n") - return nil + // Append exports container as a volume + // TODO: test the case when there are imports before + s.NoCache.HostConfig.VolumesFrom = append( + s.NoCache.HostConfig.VolumesFrom, exportsContainerID) + + cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") } - cmd := handleJSONArgs(args, attributes) + cmd = append(cmd, src...) + cmd = append(cmd, cmdDestPath) + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} - if len(cmd) > 0 { - if !attributes["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) - } - } else { - cmd = builder.Config.Cmd + if exportsID, err = b.client.CreateContainer(s); err != nil { + return s, err } + defer b.client.RemoveContainer(exportsID) + + log.Infof("| Running in %.12s: %s", exportsID, strings.Join(cmd, " ")) - // Mount exports container if there is one - if builder.exportsContainerID != "" { - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: builder.exportsContainerID, - }) - defer builder.removeLastMount() + if err = b.client.RunContainer(exportsID, false); err != nil { + return s, err } - var name string - if _, ok := flags["name"]; ok { - if flags["name"] == "" { - return fmt.Errorf("flag --name needs a value: %s", original) - } - name = flags["name"] + return s, nil +} + +// CommandImport implements IMPORT +type CommandImport struct { + cfg ConfigCommand +} + +// String returns the human readable string representation of the command +func (c *CommandImport) String() string { + return c.cfg.original +} + +// ShouldRun returns true if the command should be executed +func (c *CommandImport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandImport) Execute(b *Build) (s State, err error) { + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("IMPORT requires at least one argument") } + if len(b.exports) == 0 { + return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") + } + + // TODO: EXPORT and IMPORT cache is not invalidated properly in between + // different tracks of the same build. The EXPORT may be cached + // because it was built earlier with the same prerequisites, but the actual + // data in the exports container may be from the latest EXPORT of different + // build. So we need to prefix ~/.rocker_exports dir with some id somehow. - if _, ok := flags["hostname"]; ok && flags["hostname"] == "" { - return fmt.Errorf("flag --hostname needs a value: %s", original) + log.Infof("| Import from %s", b.exportsContainerName()) + + // If only one argument was given to IMPORT, use the same path for destination + // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} } + dest := args[len(args)-1] // last one is always the dest + src := []string{} - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - if _, ok := flags["hostname"]; ok { - builder.Config.Hostname = flags["hostname"] + for _, arg := range args[0 : len(args)-1] { + argResolved, err := util.ResolvePath(ExportsPath, arg) + if err != nil { + return s, fmt.Errorf("Invalid IMPORT source: %s", arg) } - builder.Config.Cmd = cmd - builder.Config.Entrypoint = []string{} - builder.Config.Tty = true - builder.Config.OpenStdin = true - builder.Config.StdinOnce = true - builder.Config.AttachStdin = true - builder.Config.AttachStderr = true - builder.Config.AttachStdout = true - }) - defer resetFunc() - - containerID, err := builder.createContainer(name) + src = append(src, argResolved) + } + + sort.Strings(b.exports) + s.Commit("IMPORT %q : %q %s", b.exports, src, dest) + + // Check cache + s, hit, err := b.probeCache(s) if err != nil { - return fmt.Errorf("Failed to create container, error: %s", err) + return s, err + } + if hit { + return s, nil } + + // Remember original stuff so we can restore it when we finished + origState := s + + var importID string + defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } + s = origState + s.NoCache.ContainerID = importID }() - if err := builder.runContainerAttachStdin(containerID, true); err != nil { - return fmt.Errorf("Failed to run attached container %s, error: %s", containerID, err) + cmd := []string{"/opt/rsync/bin/rsync", "-a"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") } + cmd = append(cmd, src...) + cmd = append(cmd, dest) + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + + // Append exports container as a volume + // TODO: test the case when there are imports before + s.NoCache.HostConfig.VolumesFrom = append( + s.NoCache.HostConfig.VolumesFrom, b.exportsContainerName()) + + if importID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) + + if err = b.client.RunContainer(importID, false); err != nil { + return s, err + } + + // TODO: if b.exportsCacheBusted and IMPORT cache was invalidated, + // CommitCommand then caches it anyway. + + return s, nil +} + +// CommandOnbuildWrap wraps ONBUILD command +type CommandOnbuildWrap struct { + cmd Command +} + +// String returns the human readable string representation of the command +func (c *CommandOnbuildWrap) String() string { + return "ONBUILD " + c.cmd.String() +} + +// ShouldRun returns true if the command should be executed +func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +// Execute runs the command +func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { + return c.cmd.Execute(b) +} + +////////// Private stuff ////////// + +func replaceEnv(args []string, env []string) (err error) { + for i, v := range args { + if args[i], err = shellparser.ProcessWord(v, env); err != nil { + return err + } + } return nil } diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go new file mode 100644 index 00000000..3a72f845 --- /dev/null +++ b/src/rocker/build/commands_test.go @@ -0,0 +1,693 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "fmt" + "reflect" + "rocker/imagename" + "testing" + + "github.com/kr/pretty" + "github.com/stretchr/testify/mock" + + "github.com/fsouza/go-dockerclient" + "github.com/stretchr/testify/assert" +) + +// =========== Testing FROM =========== + +func TestCommandFrom_Existing(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"existing"}, + }} + + img := &docker.Image{ + ID: "123", + Config: &docker.Config{ + Hostname: "localhost", + }, + } + + c.On("InspectImage", "existing").Return(img, nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "123", state.ImageID) + assert.Equal(t, "localhost", state.Config.Hostname) +} + +func TestCommandFrom_NotExisting(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"not-existing"}, + }} + + var nilImg *docker.Image + var nilList []*imagename.ImageName + + c.On("InspectImage", "not-existing").Return(nilImg, nil).Once() + c.On("ListImages").Return(nilList, nil).Once() + c.On("ListImageTags", "not-existing:latest").Return(nilList, nil).Once() + + _, err := cmd.Execute(b) + c.AssertExpectations(t) + assert.Equal(t, "FROM error: Image not found: not-existing:latest (also checked in the remote registry)", err.Error()) +} + +// =========== Testing RUN =========== + +func TestCommandRun_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandRun{ConfigCommand{ + args: []string{"whoami"}, + }} + + origCmd := []string{"/bin/program"} + b.state.Config.Cmd = origCmd + b.state.ImageID = "123" + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.Config.Cmd) + }).Once() + + c.On("RunContainer", "456", false).Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, origCmd, b.state.Config.Cmd) + assert.Equal(t, origCmd, state.Config.Cmd) + assert.Equal(t, "123", state.ImageID) + assert.Equal(t, "456", state.NoCache.ContainerID) +} + +// =========== Testing COMMIT =========== + +func TestCommandCommit_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandCommit{} + + resultImage := &docker.Image{ID: "789"} + b.state.ImageID = "123" + b.state.NoCache.ContainerID = "456" + b.state.Commit("a").Commit("b") + + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() + c.On("RemoveContainer", "456").Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "a; b", b.state.GetCommits()) + assert.Equal(t, "", state.GetCommits()) + assert.Equal(t, []string(nil), state.Config.Cmd) + assert.Equal(t, "789", state.ImageID) + assert.Equal(t, "", state.NoCache.ContainerID) +} + +func TestCommandCommit_NoContainer(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandCommit{} + + resultImage := &docker.Image{ID: "789"} + b.state.ImageID = "123" + b.state.Commit("a").Commit("b") + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.Config.Cmd) + }).Once() + + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() + c.On("RemoveContainer", "456").Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "a; b", b.state.GetCommits()) + assert.Equal(t, "", state.GetCommits()) + assert.Equal(t, "789", state.ImageID) + assert.Equal(t, "", state.NoCache.ContainerID) +} + +func TestCommandCommit_NoCommitMsgs(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandCommit{} + + _, err := cmd.Execute(b) + assert.Nil(t, err) +} + +// TODO: test skip commit + +// =========== Testing ENV =========== + +func TestCommandEnv_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEnv{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "ENV type=web env=prod", state.GetCommits()) + assert.Equal(t, []string{"type=web", "env=prod"}, state.Config.Env) +} + +func TestCommandEnv_Advanced(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEnv{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + b.state.Config.Env = []string{"env=dev", "version=1.2.3"} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "ENV type=web env=prod", state.GetCommits()) + assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.Config.Env) +} + +// =========== Testing LABEL =========== + +func TestCommandLabel_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandLabel{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedLabels := map[string]string{ + "type": "web", + "env": "prod", + } + + t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) + + assert.Equal(t, "LABEL type=web env=prod", state.GetCommits()) + assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") +} + +func TestCommandLabel_Advanced(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandLabel{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + b.state.Config.Labels = map[string]string{ + "env": "dev", + "version": "1.2.3", + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedLabels := map[string]string{ + "type": "web", + "version": "1.2.3", + "env": "prod", + } + + t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) + + assert.Equal(t, "LABEL type=web env=prod", state.GetCommits()) + assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") +} + +// =========== Testing MAINTAINER =========== + +func TestCommandMaintainer_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandMaintainer{ConfigCommand{ + args: []string{"terminator"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "", state.GetCommits()) +} + +// =========== Testing WORKDIR =========== + +func TestCommandWorkdir_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"/app"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/app", state.Config.WorkingDir) +} + +func TestCommandWorkdir_Relative_HasRoot(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"www"}, + }} + + b.state.Config.WorkingDir = "/home" + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/home/www", state.Config.WorkingDir) +} + +func TestCommandWorkdir_Relative_NoRoot(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"www"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/www", state.Config.WorkingDir) +} + +// =========== Testing CMD =========== + +func TestCommandCmd_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandCmd{ConfigCommand{ + args: []string{"apt-get", "install"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.Config.Cmd) +} + +func TestCommandCmd_Json(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandCmd{ConfigCommand{ + args: []string{"apt-get", "install"}, + attrs: map[string]bool{"json": true}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"apt-get", "install"}, state.Config.Cmd) +} + +// =========== Testing ENTRYPOINT =========== + +func TestCommandEntrypoint_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{"/bin/sh"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/sh", "-c", "/bin/sh"}, state.Config.Entrypoint) +} + +func TestCommandEntrypoint_Json(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{"/bin/bash", "-c"}, + attrs: map[string]bool{"json": true}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/bash", "-c"}, state.Config.Entrypoint) +} + +func TestCommandEntrypoint_Remove(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{}, + }} + + b.state.Config.Entrypoint = []string{"/bin/sh", "-c"} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{}, state.Config.Entrypoint) +} + +// =========== Testing EXPOSE =========== + +func TestCommandExpose_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandExpose{ConfigCommand{ + args: []string{"80"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedPorts := map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + } + + assert.True(t, reflect.DeepEqual(expectedPorts, state.Config.ExposedPorts), "bad exposed ports") +} + +func TestCommandExpose_Add(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandExpose{ConfigCommand{ + args: []string{"443"}, + }} + + b.state.Config.ExposedPorts = map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedPorts := map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + docker.Port("443/tcp"): struct{}{}, + } + + assert.True(t, reflect.DeepEqual(expectedPorts, state.Config.ExposedPorts), "bad exposed ports") +} + +// =========== Testing VOLUME =========== + +func TestCommandVolume_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandVolume{ConfigCommand{ + args: []string{"/data"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + volumes := map[string]struct{}{ + "/data": struct{}{}, + } + + assert.True(t, reflect.DeepEqual(volumes, state.Config.Volumes), "bad volumes") +} + +func TestCommandVolume_Add(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandVolume{ConfigCommand{ + args: []string{"/var/log"}, + }} + + b.state.Config.Volumes = map[string]struct{}{ + "/data": struct{}{}, + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + volumes := map[string]struct{}{ + "/data": struct{}{}, + "/var/log": struct{}{}, + } + + assert.True(t, reflect.DeepEqual(volumes, state.Config.Volumes), "bad volumes") +} + +// =========== Testing USER =========== + +func TestCommandUser_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandUser{ConfigCommand{ + args: []string{"www"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "www", state.Config.User) +} + +// =========== Testing ONBUILD =========== + +func TestCommandOnBuild_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandOnbuild{ConfigCommand{ + args: []string{"RUN", "make", "install"}, + original: "ONBUILD RUN make install", + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"RUN make install"}, state.Config.OnBuild) +} + +// =========== Testing COPY =========== + +func TestCommandCopy_Simple(t *testing.T) { + // TODO: do we need to check the dest is always a directory? + b, c := makeBuild(t, "", Config{}) + cmd := &CommandCopy{ConfigCommand{ + args: []string{"testdata/Rockerfile", "/Rockerfile"}, + }} + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + // TODO: a better check + assert.True(t, len(arg.Config.Cmd) > 0) + }).Once() + + c.On("UploadToContainer", "456", mock.AnythingOfType("*io.PipeReader"), "/").Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + t.Logf("state: %# v", pretty.Formatter(state)) + + c.AssertExpectations(t) + assert.Equal(t, "456", state.NoCache.ContainerID) +} + +// =========== Testing TAG =========== + +func TestCommandTag_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + b.state.ImageID = "123" + + c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + + _, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) +} + +func TestCommandTag_WrongArgsNumber(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{}, + }} + cmd2 := &CommandTag{ConfigCommand{ + args: []string{"1", "2"}, + }} + + b.state.ImageID = "123" + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "TAG requires exactly one argument") + + _, err2 := cmd2.Execute(b) + assert.EqualError(t, err2, "TAG requires exactly one argument") +} + +func TestCommandTag_NoImage(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "Cannot TAG on empty image") +} + +// =========== Testing PUSH =========== + +func TestCommandPush_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + b.cfg.Push = true + b.state.ImageID = "123" + + c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + c.On("PushImage", "docker.io/grammarly/rocker:1.0").Return("sha256:fafa", nil).Once() + + _, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) +} + +func TestCommandPush_WrongArgsNumber(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{}, + }} + cmd2 := &CommandPush{ConfigCommand{ + args: []string{"1", "2"}, + }} + + b.state.ImageID = "123" + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "PUSH requires exactly one argument") + + _, err2 := cmd2.Execute(b) + assert.EqualError(t, err2, "PUSH requires exactly one argument") +} + +func TestCommandPush_NoImage(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "Cannot PUSH empty image") +} + +// =========== Testing MOUNT =========== + +func TestCommandMount_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandMount{ConfigCommand{ + args: []string{"/src:/dest"}, + }} + + c.On("ResolveHostPath", "/src").Return("/resolved/src", nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, []string{"/resolved/src:/dest"}, state.NoCache.HostConfig.Binds) + assert.Equal(t, `MOUNT ["/src:/dest"]`, state.GetCommits()) +} + +func TestCommandMount_VolumeContainer(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandMount{ConfigCommand{ + args: []string{"/cache"}, + }} + + containerName := b.mountsContainerName("/cache") + + c.On("EnsureContainer", containerName, mock.AnythingOfType("*docker.Config"), "/cache").Return("123", nil).Run(func(args mock.Arguments) { + arg := args.Get(1).(*docker.Config) + assert.Equal(t, MountVolumeImage, arg.Image) + expectedVolumes := map[string]struct{}{ + "/cache": struct{}{}, + } + assert.True(t, reflect.DeepEqual(expectedVolumes, arg.Volumes)) + }).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + commitMsg := fmt.Sprintf("MOUNT [\"%s:/cache\"]", containerName) + + c.AssertExpectations(t) + assert.Equal(t, []string{containerName}, state.NoCache.HostConfig.VolumesFrom) + assert.Equal(t, commitMsg, state.GetCommits()) +} + +// TODO: test Cleanup diff --git a/src/rocker/build/config.go b/src/rocker/build/compare.go similarity index 95% rename from src/rocker/build/config.go rename to src/rocker/build/compare.go index 04d9cf80..1d94046b 100644 --- a/src/rocker/build/config.go +++ b/src/rocker/build/compare.go @@ -20,7 +20,7 @@ import "github.com/fsouza/go-dockerclient" // CompareConfigs compares two Config struct. Does not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs -func CompareConfigs(a, b *docker.Config) bool { +func CompareConfigs(a, b docker.Config) bool { // Experimental: do not consider rocker-data labels when comparing if _, ok := a.Labels["rocker-data"]; ok { tmp := a.Labels["rocker-data"] @@ -33,8 +33,7 @@ func CompareConfigs(a, b *docker.Config) bool { defer func() { b.Labels["rocker-data"] = tmp }() } - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { + if a.OpenStdin || b.OpenStdin { return false } diff --git a/src/rocker/build/container_formatter.go b/src/rocker/build/container_formatter.go new file mode 100644 index 00000000..b3f79439 --- /dev/null +++ b/src/rocker/build/container_formatter.go @@ -0,0 +1,49 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "fmt" + + log "github.com/Sirupsen/logrus" +) + +type formatter struct { + containerID string + level log.Level + delegate log.Formatter +} + +// NewContainerFormatter returns an object that is given to logrus to better format +// contaienr output +func NewContainerFormatter(containerID string, level log.Level) log.Formatter { + return &formatter{ + containerID: containerID, + level: level, + delegate: log.StandardLogger().Formatter, + } +} + +// Format formats a message from container +func (f *formatter) Format(entry *log.Entry) ([]byte, error) { + e := entry.WithFields(log.Fields{ + "container": fmt.Sprintf("%.12s", f.containerID), + }) + e.Message = entry.Message + e.Level = f.level + return f.delegate.Format(e) +} diff --git a/src/rocker/build/containers.go b/src/rocker/build/containers.go deleted file mode 100644 index cdb057af..00000000 --- a/src/rocker/build/containers.go +++ /dev/null @@ -1,292 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "fmt" - "io" - "os" - "os/signal" - - "rocker/util" - - "github.com/docker/docker/pkg/term" - "github.com/fsouza/go-dockerclient" -) - -func (builder *Builder) runAndCommit(cmd []string, comment string) error { - // set Cmd manually, this is special case only for Dockerfiles - origCmd := builder.Config.Cmd - clearFunc := builder.temporaryCmd(cmd) - defer clearFunc() - - hit, err := builder.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - containerID, err := builder.createContainer("") - if err != nil { - return fmt.Errorf("Failed to create container, error: %s", err) - } - defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } - }() - - if err := builder.runContainer(containerID); err != nil { - return fmt.Errorf("Failed to run container %s, error: %s", containerID, err) - } - - return builder.commitContainer(containerID, origCmd, comment) -} - -func (builder *Builder) createContainer(name string) (string, error) { - volumesFrom := builder.getMountContainerIds() - binds := builder.getBinds() - - builder.Config.Image = builder.imageID - - opts := docker.CreateContainerOptions{ - Name: name, - Config: builder.Config, - HostConfig: &docker.HostConfig{ - Binds: binds, - VolumesFrom: volumesFrom, - }, - } - - container, err := builder.Docker.CreateContainer(opts) - if err != nil { - return "", err - } - - fmt.Fprintf(builder.OutStream, "[Rocker] ---> Running in %.12s (image id = %.12s)\n", container.ID, builder.imageID) - - return container.ID, nil -} - -func (builder *Builder) removeContainer(containerID string) error { - fmt.Fprintf(builder.OutStream, "[Rocker] Removing intermediate container %.12s\n", containerID) - // TODO: always force? - return builder.Docker.RemoveContainer(docker.RemoveContainerOptions{ID: containerID, Force: true}) -} - -func (builder *Builder) runContainer(containerID string) error { - return builder.runContainerAttachStdin(containerID, false) -} - -func (builder *Builder) runContainerAttachStdin(containerID string, attachStdin bool) error { - success := make(chan struct{}) - - attachOpts := docker.AttachToContainerOptions{ - Container: containerID, - OutputStream: util.PrefixPipe("[Docker] ", builder.OutStream), - ErrorStream: util.PrefixPipe("[Docker] ", builder.OutStream), - Stdout: true, - Stderr: true, - Stream: true, - Success: success, - } - - if attachStdin { - if !builder.isTerminalIn { - return fmt.Errorf("Cannot attach to a container on non tty input") - } - oldState, err := term.SetRawTerminal(builder.fdIn) - if err != nil { - return err - } - defer term.RestoreTerminal(builder.fdIn, oldState) - - attachOpts.InputStream = readerVoidCloser{builder.InStream} - attachOpts.OutputStream = builder.OutStream - attachOpts.ErrorStream = builder.OutStream - attachOpts.Stdin = true - attachOpts.RawTerminal = true - } - - finished := make(chan struct{}, 1) - - go func() { - if err := builder.Docker.AttachToContainer(attachOpts); err != nil { - select { - case <-finished: - // Ignore any attach errors when we have finished already. - // It may happen if we attach stdin, then container exit, but then there is other input from stdin continues. - // This is the case when multiple ATTACH command are used in a single Rockerfile. - // The problem though is that we cannot close stdin, to have it available for the subsequent ATTACH; - // therefore, hijack goroutine from the previous ATTACH will hang until the input received and then - // it will fire an error. - // It's ok for `rocker` since it is not a daemon, but rather a one-off command. - // - // Also, there is still a problem that `rocker` loses second character from the Stdin in a second ATTACH. - // But let's consider it a corner case. - default: - // Print the error. We cannot return it because the main routine is handing on WaitContaienr - fmt.Fprintf(builder.OutStream, "Got error while attaching to container %s: %s\n", containerID, err) - } - } - }() - - success <- <-success - - if err := builder.Docker.StartContainer(containerID, &docker.HostConfig{}); err != nil { - return err - } - - if attachStdin { - if err := builder.monitorTtySize(containerID); err != nil { - return fmt.Errorf("Failed to monitor TTY size for container %s, error: %s", containerID, err) - } - } - - sigch := make(chan os.Signal, 1) - signal.Notify(sigch, os.Interrupt) - - errch := make(chan error) - - go func() { - statusCode, err := builder.Docker.WaitContainer(containerID) - if err != nil { - errch <- err - } else if statusCode != 0 { - errch <- fmt.Errorf("Failed to run container, exit with code %d", statusCode) - } - errch <- nil - return - }() - - select { - case err := <-errch: - // indicate 'finished' so the `attach` goroutine will not give any errors - finished <- struct{}{} - if err != nil { - return err - } - case <-sigch: - fmt.Fprintf(builder.OutStream, "[Rocker] Received SIGINT, remove current container...\n") - if err := builder.removeContainer(containerID); err != nil { - fmt.Fprintf(builder.OutStream, "[Rocker] Failed to remove container: %s\n", err) - } - // TODO: send signal to builder.Build() and have a proper cleanup - os.Exit(2) - } - - return nil -} - -func (builder *Builder) commitContainer(containerID string, autoCmd []string, comment string) (err error) { - - if containerID == "" { - clearFunc := builder.temporaryCmd([]string{"/bin/sh", "-c", "#(nop) " + comment}) - defer clearFunc() - - hit, err := builder.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - containerID, err = builder.createContainer("") - if err != nil { - return err - } - - defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } - }() - } - - // clone the struct - autoConfig := *builder.Config - autoConfig.Cmd = autoCmd - - commitOpts := docker.CommitContainerOptions{ - Container: containerID, - Message: "", - Run: &autoConfig, - } - - image, err := builder.Docker.CommitContainer(commitOpts) - if err != nil { - return err - } - - builder.imageID = image.ID - - return nil -} - -func (builder *Builder) ensureContainer(containerName string, config *docker.Config, purpose string) (*docker.Container, error) { - // Check if container exists - container, err := builder.Docker.InspectContainer(containerName) - - // No data volume container for this build, create it - if _, ok := err.(*docker.NoSuchContainer); ok { - - if err := builder.ensureImage(config.Image, purpose); err != nil { - return container, fmt.Errorf("Failed to check image %s, error: %s", config.Image, err) - } - - fmt.Fprintf(builder.OutStream, "[Rocker] Create container: %s for %s\n", containerName, purpose) - - createOpts := docker.CreateContainerOptions{ - Name: containerName, - Config: config, - } - - container, err = builder.Docker.CreateContainer(createOpts) - if err != nil { - return container, fmt.Errorf("Failed to create container %s from image %s, error: %s", containerName, config.Image, err) - } - } else if err == nil { - fmt.Fprintf(builder.OutStream, "[Rocker] Use existing container: %s for %s\n", containerName, purpose) - } - - return container, err -} - -// readerVoidCloser is a hack of the improved go-dockerclient's hijacking behavior -// It simply wraps io.Reader (os.Stdin in our case) and discards any Close() call. -// -// It's important because we don't want to close os.Stdin for two reasons: -// 1. We need to restore the terminal back from the raw mode after ATTACH -// 2. There can be other ATTACH instructions for which we need an open stdin -// -// See additional notes in the runContainerAttachStdin() function -type readerVoidCloser struct { - reader io.Reader -} - -// Read reads from current reader -func (r readerVoidCloser) Read(p []byte) (int, error) { - return r.reader.Read(p) -} - -// Close is a viod function, does nothing -func (r readerVoidCloser) Close() error { - return nil -} diff --git a/src/rocker/build/copy.go b/src/rocker/build/copy.go new file mode 100644 index 00000000..5c523d4f --- /dev/null +++ b/src/rocker/build/copy.go @@ -0,0 +1,423 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "archive/tar" + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/units" + "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" +) + +const buffer32K = 32 * 1024 + +type upload struct { + tar io.ReadCloser + size int64 + src string + files []*uploadFile + dest string +} + +type uploadFile struct { + src string + dest string + relDest string + size int64 +} + +func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { + + s = b.state + + if len(args) < 2 { + return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + var ( + tarSum tarsum.TarSum + src = args[0 : len(args)-1] + dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest + u *upload + excludes = s.NoCache.Dockerignore + ) + + // If destination is not a directory (no leading slash) + hasLeadingSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + if !hasLeadingSlash && len(src) > 1 { + return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + if !filepath.IsAbs(dest) { + dest = filepath.Join(s.Config.WorkingDir, dest) + // Add the leading slash back if we had it before + if hasLeadingSlash { + dest += string(os.PathSeparator) + } + } + + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { + return s, err + } + + // skip COPY if no files matched + if len(u.files) == 0 { + log.Infof("| No files matched") + return s, nil + } + + log.Infof("| Calculating tarsum for %d files (%s total)", len(u.files), units.HumanSize(float64(u.size))) + + if tarSum, err = tarsum.NewTarSum(u.tar, true, tarsum.Version1); err != nil { + return s, err + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return s, err + } + u.tar.Close() + + // TODO: useful commit comment? + + message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) + s.Commit(message) + + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + + origCmd := s.Config.Cmd + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + s.Config.Cmd = origCmd + + // We need to make a new tar stream, because the previous one has been + // read by the tarsum; maybe, optimize this in future + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { + return s, err + } + + // Copy to "/" because we made the prefix inside the tar archive + // Do that because we are not able to reliably create directories inside the container + if err = b.client.UploadToContainer(s.NoCache.ContainerID, u.tar, "/"); err != nil { + return s, err + } + + return s, nil +} + +func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) (u *upload, err error) { + + u = &upload{ + src: srcPath, + dest: dest, + } + + if u.files, err = listFiles(srcPath, includes, excludes); err != nil { + return u, err + } + + // Calculate total size + for _, f := range u.files { + u.size += f.size + } + + sep := string(os.PathSeparator) + + if len(u.files) == 0 { + return u, nil + } + + // If we transfer a single item + if len(includes) == 1 { + var ( + item = filepath.Clean(includes[0]) + itemPath = filepath.Join(srcPath, item) + hasLeadingSlash = strings.HasSuffix(u.dest, sep) + hasWildcards = containsWildcards(item) + itemIsDir = false + addSep = false + stripDir = false + ) + + if stat, err := os.Stat(itemPath); err == nil && stat.IsDir() { + itemIsDir = true + } + + // The destination is not a directory (no leading slash) add it to the end + if !hasLeadingSlash { + addSep = true + } + + // If the item copied is a directory, we have to strip its name + // e.g. COPY asd[/1,2] /lib --> /lib[/1,2] but not /lib/asd[/1,2] + if itemIsDir { + stripDir = true + } else if !hasWildcards && !hasLeadingSlash { + // If we've got a single file that was explicitly pointed in the source item + // we need to replace its name with the destination + // e.g. COPY src/foo.txt /app/bar.txt + u.files[0].dest = strings.TrimLeft(u.dest, sep) + u.dest = "" + addSep = false + } + + if stripDir { + for i := range u.files { + relDest, err := filepath.Rel(item, u.files[i].dest) + if err != nil { + return u, err + } + u.files[i].dest = relDest + } + } + + if addSep { + u.dest += sep + } + } + + // Cut the slash prefix from the dest, because it will be the root of the tar + // the archive will be always uploaded to the root of a container + if strings.HasPrefix(u.dest, sep) { + u.dest = u.dest[1:] + } + + log.Debugf("Making archive prefix=%s %# v", u.dest, pretty.Formatter(u)) + + pipeReader, pipeWriter := io.Pipe() + u.tar = pipeReader + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(pipeWriter), + Buffer: bufio.NewWriterSize(nil, buffer32K), + SeenFiles: make(map[uint64]string), + } + + defer func() { + if err := ta.TarWriter.Close(); err != nil { + log.Errorf("Failed to close tar writer, error: %s", err) + } + if err := pipeWriter.Close(); err != nil { + log.Errorf("Failed to close pipe writer, error: %s", err) + } + }() + + // write files to tar + for _, f := range u.files { + ta.addTarFile(f.src, u.dest+f.dest) + } + }() + + return u, nil +} + +func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, error) { + + result := []*uploadFile{} + seen := map[string]struct{}{} + + // TODO: support urls + // TODO: support local archives (and maybe a remote archives as well) + + excludes, patDirs, exceptions, err := fileutils.CleanPatterns(excludes) + if err != nil { + return nil, err + } + + // TODO: here we remove some exclude patterns, how about patDirs? + excludes, nestedPatterns := findNestedPatterns(excludes) + + for _, pattern := range includes { + + matches, err := filepath.Glob(filepath.Join(srcPath, pattern)) + if err != nil { + return result, err + } + + for _, match := range matches { + + // We need to check if the current match is dir + // to prefix files inside with it + matchInfo, err := os.Stat(match) + if err != nil { + return result, err + } + + // Walk through each match since it may be a directory + err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error { + + relFilePath, err := filepath.Rel(srcPath, path) + if err != nil { + return err + } + + // TODO: ensure ignoring works correctly, maybe improve .dockerignore to work more like .gitignore? + + skip := false + skipNested := false + + // Here we want to keep files that are specified explicitly in the includes, + // no matter what. For example, .dockerignore can have some wildcard items + // specified, by in COPY we want explicitly add a file, that could be ignored + // otherwise using a wildcard or directory COPY + if pattern != relFilePath { + if skip, err = fileutils.OptimizedMatches(relFilePath, excludes, patDirs); err != nil { + return err + } + if skipNested, err = matchNested(relFilePath, nestedPatterns); err != nil { + return err + } + } + + if skip || skipNested { + if !exceptions && info.IsDir() { + return filepath.SkipDir + } + return nil + } + + // TODO: read links? + + // not interested in dirs, since we walk already + if info.IsDir() { + return nil + } + + // skip checking if symlinks point to non-existing file + // also skip named pipes, because they hanging on open + if info.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if _, ok := seen[relFilePath]; ok { + return nil + } + seen[relFilePath] = struct{}{} + + // cut the wildcard path of the file or use base name + + var ( + resultFilePath string + baseChunks = splitPath(pattern) + destChunks = splitPath(relFilePath) + lastChunk = baseChunks[len(baseChunks)-1] + ) + + if containsWildcards(lastChunk) { + // In case there is `foo/bar/*` source path we need to make a + // destination files without `foo/bar/` prefix + resultFilePath = filepath.Join(destChunks[len(baseChunks)-1:]...) + } else if matchInfo.IsDir() { + // If source is a directory, keep as is + resultFilePath = relFilePath + } else { + // The source has referred to a file + resultFilePath = filepath.Base(relFilePath) + } + + result = append(result, &uploadFile{ + src: path, + dest: resultFilePath, + relDest: relFilePath, + size: info.Size(), + }) + + return nil + }) + + if err != nil { + return result, err + } + } + } + + return result, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func splitPath(path string) []string { + return strings.Split(path, string(os.PathSeparator)) +} + +type nestedPattern struct { + prefix string + pattern string +} + +func (p nestedPattern) Match(path string) (bool, error) { + if !strings.HasPrefix(path, p.prefix) { + return false, nil + } + return filepath.Match(p.pattern, filepath.Base(path)) +} + +func matchNested(path string, patterns []nestedPattern) (bool, error) { + for _, p := range patterns { + if m, err := p.Match(path); err != nil || m { + return m, err + } + } + return false, nil +} + +func findNestedPatterns(excludes []string) (newExcludes []string, nested []nestedPattern) { + newExcludes = []string{} + nested = []nestedPattern{} + for _, e := range excludes { + i := strings.Index(e, "**/") + // keep exclude + if i < 0 { + newExcludes = append(newExcludes, e) + continue + } + // make a nested pattern + nested = append(nested, nestedPattern{e[:i], e[i+3:]}) + } + return newExcludes, nested +} diff --git a/src/rocker/build/copy_test.go b/src/rocker/build/copy_test.go new file mode 100644 index 00000000..4a44132e --- /dev/null +++ b/src/rocker/build/copy_test.go @@ -0,0 +1,867 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "os/exec" + "rocker/test" + "strings" + "testing" + + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/pkg/tarsum" +) + +func TestCopy_ListFiles_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "file1.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "file1.txt", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/file1.txt", "file1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Wildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "file1.txt": "hello", + "file2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*.txt", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/file1.txt", "file1.txt"}, + {tmpDir + "/file2.txt", "file2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Dir_Simple(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "dir/foo.txt": "hello", + "dir/bar.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "dir", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/dir/bar.txt", "dir/bar.txt"}, + {tmpDir + "/dir/foo.txt", "dir/foo.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Dir_AndFiles(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test.txt": "hello", + "dir/foo.txt": "hello", + "dir/bar.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/dir/bar.txt", "dir/bar.txt"}, + {tmpDir + "/dir/foo.txt", "dir/foo.txt"}, + {tmpDir + "/test.txt", "test.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Dir_Multi(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a", + "b/2.txt", + "c", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test.txt", "a/test.txt"}, + {tmpDir + "/b/2.txt", "2.txt"}, + {tmpDir + "/c/foo.txt", "c/foo.txt"}, + {tmpDir + "/c/x/1.txt", "c/x/1.txt"}, + {tmpDir + "/c/x/2.txt", "c/x/2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*.txt", + } + excludes := []string{ + "test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test1.txt", "test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Explicit(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + "test3.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "test2.txt", + } + excludes := []string{ + "*.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test2.txt", "test2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Exception(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + "test3.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*", + } + excludes := []string{ + "*.txt", + "!test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test2.txt", "test2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Dir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test1.txt": "hello", + "b/test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{ + "b", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test1.txt", "a/test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_FileInAnyDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test1.txt": "hello", + "b/test2.txt": "hello", + "c/d/e/test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{ + "**/test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test1.txt", "a/test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_MakeTarStream_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a", + "b/2.txt", + "c", + } + excludes := []string{} + dest := "/" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "a/test.txt", + "2.txt", + "c/foo.txt", + "c/x/1.txt", + "c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_FileRename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "./a/test.txt", + } + excludes := []string{} + dest := "/src/x.txt" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/x.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_OneFileToDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a/test.txt", + } + excludes := []string{} + dest := "/src/" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/test.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_CurrentDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{} + dest := "/go/app/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "go/app/src/a/test.txt", + "go/app/src/b/1.txt", + "go/app/src/b/2.txt", + "go/app/src/c/foo.txt", + "go/app/src/c/x/1.txt", + "go/app/src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_DirRename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "./c", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + "src/x/1.txt", + "src/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_DirRenameLeadingSlash(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c/ /src --> /src + + includes := []string{ + "./c/", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + "src/x/1.txt", + "src/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_SingleFileToDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "foo.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + assertions := [][3]string{ + {"foo.txt", "foo", "foo"}, + {"foo.txt", "foo/", "foo/foo.txt"}, + } + + for _, a := range assertions { + includes := []string{a[0]} + excludes := []string{} + dest := a[1] + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{a[2]}, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content for COPY %s %s", a[0], a[1]) + } +} + +// TODO: +// WORKDIR /app +// COPY lib lib/ +// should copy to /app/lib + +func TestCopy_MakeTarStream_DirRenameDestLeadingSlash(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "lib/foo.txt": "hello", + "lib/x/1.txt": "hello", + "lib/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // WORKDIR /app + // COPY lib lib + // COPY lib lib/ + // COPY lib/ lib + // COPY lib/ lib/ + // /app + // /app/lib + // /app/lib/foo.txt + // /app/lib/x/1.txt + // /app/lib/x/2.txt + + assertions := [][2]string{ + {"lib", "lib"}, + {"lib", "lib/"}, + {"lib/", "lib"}, + {"lib/", "lib/"}, + } + + for _, a := range assertions { + includes := []string{a[0]} + excludes := []string{} + dest := a[1] + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "lib/foo.txt", + "lib/x/1.txt", + "lib/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content for COPY %s %s", a[0], a[1]) + } +} + +func TestCopy_MakeTarStream_DirRenameWildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "*", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/c/foo.txt", + "src/c/x/1.txt", + "src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_SubDirRenameWildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/c/foo.txt": "hello", + "a/c/x/1.txt": "hello", + "a/c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a/*", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/c/foo.txt", + "src/c/x/1.txt", + "src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_WierdWildcards(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "abc.txt": "hello", + "adf.txt": "hello", + "bvz.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a*.txt", + } + excludes := []string{} + dest := "./" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "./abc.txt", + "./adf.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "./c", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +// helper functions + +func makeTmpDir(t *testing.T, files map[string]string) string { + tmpDir, err := ioutil.TempDir("", "rocker-copy-test") + if err != nil { + t.Fatal(err) + } + if err := test.MakeFiles(tmpDir, files); err != nil { + os.RemoveAll(tmpDir) + t.Fatal(err) + } + t.Logf("temp files: %# v", pretty.Formatter(files)) + return tmpDir +} + +func writeReadTar(t *testing.T, tmpDir string, tarStream io.ReadCloser) string { + data, err := ioutil.ReadAll(tarStream) + if err != nil { + t.Fatal(err) + } + defer tarStream.Close() + + tarSum, err := tarsum.NewTarSum(bytes.NewReader(data), true, tarsum.Version1) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + t.Fatal(err) + } + t.Logf("tarsum: %s", tarSum.Sum(nil)) + + if err := ioutil.WriteFile(tmpDir+"/archive.tar", data, 0644); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir + "/archive.tar") + + cmd := exec.Command("tar", "-tf", tmpDir+"/archive.tar") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + return string(out) +} diff --git a/src/rocker/build/dockerignore.go b/src/rocker/build/dockerignore.go new file mode 100644 index 00000000..31a0aaa1 --- /dev/null +++ b/src/rocker/build/dockerignore.go @@ -0,0 +1,70 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "bufio" + "io" + "os" + "path/filepath" + "regexp" + "strings" +) + +// TODO: maybe move some stuff from copy.go here + +var ( + dockerignoreCommentRegexp = regexp.MustCompile("\\s*#.*") +) + +// ReadDockerignoreFile reads and parses .dockerignore file +func ReadDockerignoreFile(file string) ([]string, error) { + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadDockerignore(fd) +} + +// ReadDockerignore reads and parses .dockerignore file from io.Reader +func ReadDockerignore(r io.Reader) ([]string, error) { + var ( + scanner = bufio.NewScanner(r) + result = []string{} + ) + + for scanner.Scan() { + // Strip comments + line := scanner.Text() + line = dockerignoreCommentRegexp.ReplaceAllString(line, "") + // Eliminate leading and trailing whitespace. + pattern := strings.TrimSpace(line) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + result = append(result, pattern) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return result, nil +} diff --git a/src/rocker/build/dockerignore_test.go b/src/rocker/build/dockerignore_test.go new file mode 100644 index 00000000..082395e8 --- /dev/null +++ b/src/rocker/build/dockerignore_test.go @@ -0,0 +1,55 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDockerignore_Read(t *testing.T) { + content := ` + # commend +README.md +**/*.o +!result.o + +# Some comment + .idea +.git + +a/b/../c # inline commend +` + + result, err := ReadDockerignore(strings.NewReader(content)) + if err != nil { + t.Fatal(err) + } + + expected := []string{ + "README.md", + "**/*.o", + "!result.o", + ".idea", + ".git", + "a/c", + } + + assert.Equal(t, expected, result) +} diff --git a/src/rocker/build/imagedata.go b/src/rocker/build/imagedata.go deleted file mode 100644 index ae27acef..00000000 --- a/src/rocker/build/imagedata.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "encoding/json" - "fmt" - "log" - "rocker/imagename" - "rocker/template" - "time" - - "github.com/fatih/color" -) - -// RockerImageData provides metadata for images built with Rocker -// It can be attached to a container label called "rocker-data" if -// --meta flag was given to `rocker build` -type RockerImageData struct { - ImageName *imagename.ImageName - Rockerfile string - Vars template.Vars - Properties template.Vars - Created time.Time -} - -// PrettyString returns RockerImageData as a printable string -func (data *RockerImageData) PrettyString() string { - prettyVars, err := json.MarshalIndent(data.Vars, "", " ") - if err != nil { - log.Fatal(err) - } - prettyProps, err := json.MarshalIndent(data.Properties, "", " ") - if err != nil { - log.Fatal(err) - } - green := color.New(color.FgGreen).SprintfFunc() - yellow := color.New(color.FgYellow).SprintfFunc() - sep := "=======================================================\n" - - res := fmt.Sprintf("%s%s\n", green(sep), - green("Image: %s", data.ImageName.String())) - - if !data.Created.IsZero() { - res = fmt.Sprintf("%sCreated: %s\n", res, data.Created.Format(time.RFC850)) - } - - if data.Properties != nil { - res = fmt.Sprintf("%sProperties: %s\n", res, prettyProps) - } - - if data.Vars != nil { - res = fmt.Sprintf("%sVars: %s\n", res, prettyVars) - } - - if data.Rockerfile != "" { - res = fmt.Sprintf("%s%s\n%s\n%s\n%s", res, yellow("Rockerfile:"), yellow(sep), data.Rockerfile, yellow(sep)) - } - - return res -} diff --git a/src/rocker/build/internals.go b/src/rocker/build/internals.go deleted file mode 100644 index 2a6f207e..00000000 --- a/src/rocker/build/internals.go +++ /dev/null @@ -1,477 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "regexp" - "strings" - "time" - - "rocker/imagename" - "rocker/parser" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/fsouza/go-dockerclient" -) - -var ( - captureImageID = regexp.MustCompile("Successfully built ([a-f0-9]{12})") - captureDigest = regexp.MustCompile("digest:\\s*(sha256:[a-f0-9]{64})") -) - -func (builder *Builder) checkDockerignore() (err error) { - ignoreLines := []string{ - ".dockerignore", - builder.getTmpPrefix() + "*", - builder.rockerfileRelativePath(), - } - dockerignoreFile := path.Join(builder.ContextDir, ".dockerignore") - - // everything is easy, we just need to create one - if _, err := os.Stat(dockerignoreFile); os.IsNotExist(err) { - fmt.Fprintf(builder.OutStream, "[Rocker] Create .dockerignore in context directory\n") - newLines := append([]string{ - "# This file is automatically generated by Rocker, please keep it", - }, ignoreLines...) - return ioutil.WriteFile(dockerignoreFile, []byte(strings.Join(newLines, "\n")+"\n"), 0644) - } - - // more difficult, find missing lines - file, err := os.Open(dockerignoreFile) - if err != nil { - return err - } - defer file.Close() - - // read current .dockerignore and filter those ignoreLines which are already there - scanner := bufio.NewScanner(file) - newLines := []string{} - for scanner.Scan() { - currentLine := scanner.Text() - newLines = append(newLines, currentLine) - if currentLine == ".git" { - builder.gitIgnored = true - } - for i, ignoreLine := range ignoreLines { - if ignoreLine == currentLine { - ignoreLines = append(ignoreLines[:i], ignoreLines[i+1:]...) - break - } - } - } - - if err := scanner.Err(); err != nil { - return err - } - - // if we have still something to add - do it - if len(ignoreLines) > 0 { - newLines = append(newLines, ignoreLines...) - fmt.Fprintf(builder.OutStream, "[Rocker] Add %d lines to .dockerignore\n", len(ignoreLines)) - return ioutil.WriteFile(dockerignoreFile, []byte(strings.Join(newLines, "\n")+"\n"), 0644) - } - - return nil -} - -func (builder *Builder) runDockerfile() (err error) { - if len(builder.dockerfile.Children) == 0 { - return nil - } - - // HACK: skip if all we have is "FROM scratch", we need to do something - // to produce actual layer with ID, so create dummy LABEL layer - // maybe there is a better solution, but keep this for a while - if len(builder.dockerfile.Children) == 1 && - builder.dockerfile.Children[0].Value == "from" && - builder.dockerfile.Children[0].Next.Value == "scratch" { - - builder.dockerfile.Children = append(builder.dockerfile.Children, &parser.Node{ - Value: "label", - Next: &parser.Node{ - Value: "ROCKER_SCRATCH=1", - }, - }) - } - - pull := builder.Pull - - // missing from, use latest image sha - if builder.dockerfile.Children[0].Value != "from" { - if builder.imageID == "" { - return fmt.Errorf("Missing initial FROM instruction") - } - fromNode := &parser.Node{ - Value: "from", - Next: &parser.Node{ - Value: builder.imageID, - }, - } - pull = false - builder.dockerfile.Children = append([]*parser.Node{fromNode}, builder.dockerfile.Children...) - } - - // Write Dockerfile to a context - dockerfileName := builder.dockerfileName() - dockerfilePath := path.Join(builder.ContextDir, dockerfileName) - - dockerfileContent, err := RockerfileAstToString(builder.dockerfile) - if err != nil { - return err - } - - if err := ioutil.WriteFile(dockerfilePath, []byte(dockerfileContent), 0644); err != nil { - return err - } - defer os.Remove(dockerfilePath) - - // TODO: here we can make a hint to a user, if the context directory is very large, - // suggest to add some stuff to .dockerignore, etc - - pipeReader, pipeWriter := io.Pipe() - - var buf bytes.Buffer - outStream := io.MultiWriter(pipeWriter, &buf) - - // TODO: consider ForceRmTmpContainer: true - opts := docker.BuildImageOptions{ - Dockerfile: dockerfileName, - OutputStream: outStream, - ContextDir: builder.ContextDir, - NoCache: !builder.UtilizeCache, - Auth: *builder.Auth, - Pull: pull, - RawJSONStream: true, - } - - errch := make(chan error) - - go func() { - err := builder.Docker.BuildImage(opts) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return fmt.Errorf("Failed to process json stream error: %s", err) - } - - if err := <-errch; err != nil { - return fmt.Errorf("Failed to build image: %s", err) - } - - // It is the best way to have built image id so far - // The other option would be to tag the image, and then remove the tag - // http://stackoverflow.com/questions/19776308/get-image-id-from-image-created-via-remote-api - matches := captureImageID.FindStringSubmatch(buf.String()) - if len(matches) == 0 { - return fmt.Errorf("Couldn't find image id out of docker build output") - } - imageID := matches[1] - - // Retrieve image id - image, err := builder.Docker.InspectImage(imageID) - if err != nil { - // fix go-dockerclient non descriptive error - if err.Error() == "no such image" { - err = fmt.Errorf("No such image: %s", imageID) - } - return err - } - - builder.imageID = image.ID - builder.Config = image.Config - - // clean it up - builder.dockerfile = &parser.Node{} - - return nil -} - -func (builder *Builder) addLabels(labels map[string]string) { - if builder.Config.Labels == nil { - builder.Config.Labels = map[string]string{} - } - for k, v := range labels { - builder.Config.Labels[k] = v - } -} - -func (builder *Builder) temporaryCmd(cmd []string) func() { - origCmd := builder.Config.Cmd - builder.Config.Cmd = cmd - return func() { - builder.Config.Cmd = origCmd - } -} - -func (builder *Builder) temporaryConfig(fn func()) func() { - // actually copy the whole config - origConfig := *builder.Config - fn() - return func() { - builder.Config = &origConfig - } -} - -func (builder *Builder) probeCache() (bool, error) { - if !builder.UtilizeCache || builder.cacheBusted { - return false, nil - } - - cache, err := builder.imageGetCached(builder.imageID, builder.Config) - if err != nil { - return false, err - } - if cache == nil { - builder.cacheBusted = true - return false, nil - } - - fmt.Fprintf(builder.OutStream, "[Rocker] ---> Using cache\n") - - builder.imageID = cache.ID - return true, nil -} - -func (builder *Builder) imageGetCached(imageID string, config *docker.Config) (*docker.Image, error) { - // Retrieve all images and cache, because it might be a heavy operation - if builder.imagesCache == nil { - var err error - if builder.imagesCache, err = builder.Docker.ListImages(docker.ListImagesOptions{All: true}); err != nil { - return nil, err - } - } - - var siblings []string - for _, img := range builder.imagesCache { - if img.ParentID != imageID { - continue - } - siblings = append(siblings, img.ID) - } - - // Loop on the children of the given image and check the config - var match *docker.Image - - if len(siblings) == 0 { - return match, nil - } - - // TODO: ensure goroutines die if return abnormally - - ch := make(chan *docker.Image) - errch := make(chan error) - numResponses := 0 - - for _, siblingID := range siblings { - go func(siblingID string) { - image, err := builder.Docker.InspectImage(siblingID) - if err != nil { - errch <- err - return - } - ch <- image - }(siblingID) - } - - for { - select { - case image := <-ch: - if CompareConfigs(&image.ContainerConfig, config) { - if match == nil || match.Created.Before(image.Created) { - match = image - } - } - - numResponses++ - - if len(siblings) == numResponses { - return match, nil - } - - case err := <-errch: - return nil, err - - case <-time.After(10 * time.Second): - // TODO: return "cache didn't hit"? - return nil, fmt.Errorf("Timeout while fetching cached images") - } - } -} - -func (builder *Builder) ensureImage(imageName string, purpose string) error { - _, err := builder.Docker.InspectImage(imageName) - if err != nil && err.Error() == "no such image" { - fmt.Fprintf(builder.OutStream, "[Rocker] Pulling image: %s for %s\n", imageName, purpose) - - image := imagename.NewFromString(imageName) - - pipeReader, pipeWriter := io.Pipe() - - pullOpts := docker.PullImageOptions{ - Repository: image.NameWithRegistry(), - Registry: image.Registry, - Tag: image.GetTag(), - OutputStream: pipeWriter, - RawJSONStream: true, - } - - errch := make(chan error) - - go func() { - err := builder.Docker.PullImage(pullOpts, *builder.Auth) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) - } - - if err := <-errch; err != nil { - return fmt.Errorf("Failed to pull image: %s, error: %s", image, err) - } - } else if err != nil { - return err - } - return nil -} - -func (builder *Builder) pushImage(image imagename.ImageName) (digest string, err error) { - - var ( - pipeReader, pipeWriter = io.Pipe() - errch = make(chan error) - - buf bytes.Buffer - outStream = io.MultiWriter(pipeWriter, &buf) - ) - - go func() { - err := builder.Docker.PushImage(docker.PushImageOptions{ - Name: image.NameWithRegistry(), - Tag: image.GetTag(), - Registry: image.Registry, - OutputStream: outStream, - RawJSONStream: true, - }, *builder.Auth) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return "", fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) - } - - if err := <-errch; err != nil { - return "", fmt.Errorf("Failed to push image: %s, error: %s", image, err) - } - - // It is the best way to have pushed image digest so far - matches := captureDigest.FindStringSubmatch(buf.String()) - if len(matches) > 0 { - digest = matches[1] - } - - return digest, nil -} - -func (builder *Builder) makeExportsContainer() (string, error) { - if builder.exportsContainerID != "" { - return builder.exportsContainerID, nil - } - exportsContainerName := builder.exportsContainerName() - - containerConfig := &docker.Config{ - Image: rsyncImage, - Volumes: map[string]struct{}{ - "/opt/rsync/bin": struct{}{}, - exportsVolume: struct{}{}, - }, - Labels: map[string]string{ - "Rockerfile": builder.Rockerfile, - "ImageId": builder.imageID, - }, - } - - container, err := builder.ensureContainer(exportsContainerName, containerConfig, "exports") - if err != nil { - return "", err - } - - builder.exportsContainerID = container.ID - - return container.ID, nil -} - -func (builder *Builder) getMountContainerIds() []string { - containerIds := make(map[string]struct{}) - for _, mount := range builder.mounts { - if mount.containerID != "" { - containerIds[mount.containerID] = struct{}{} - } - } - result := []string{} - for containerID := range containerIds { - result = append(result, containerID) - } - return result -} - -func (builder *Builder) getAllMountContainerIds() []string { - containerIds := make(map[string]struct{}) - for _, mount := range builder.allMounts { - if mount.containerID != "" { - containerIds[mount.containerID] = struct{}{} - } - } - result := []string{} - for containerID := range containerIds { - result = append(result, containerID) - } - return result -} - -func (builder *Builder) getBinds() []string { - var result []string - for _, mount := range builder.mounts { - if mount.containerID == "" { - result = append(result, mount.src+":"+mount.dest) - } - } - return result -} diff --git a/src/rocker/build/plan.go b/src/rocker/build/plan.go new file mode 100644 index 00000000..b2a209d7 --- /dev/null +++ b/src/rocker/build/plan.go @@ -0,0 +1,95 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import "strings" + +// Plan is the list of commands to be executed sequentially by a build process +type Plan []Command + +// NewPlan makes a new plan out of the list of commands from a Rockerfile +func NewPlan(commands []ConfigCommand, finalCleanup bool) (plan Plan, err error) { + plan = Plan{} + + committed := true + + commit := func() { + plan = append(plan, &CommandCommit{}) + committed = true + } + + cleanup := func(i int) { + plan = append(plan, &CommandCleanup{ + final: i == len(commands)-1, + tagged: strings.Contains("tag push from", commands[i].name), + }) + } + + alwaysCommitBefore := "run attach add copy tag push export import" + alwaysCommitAfter := "run attach add copy export import" + neverCommitAfter := "from maintainer tag push" + + for i := 0; i < len(commands); i++ { + cfg := commands[i] + + cmd, err := NewCommand(cfg) + if err != nil { + return nil, err + } + + // We want to reset the collected state between FROM instructions + // But do it only if it's not the first FROM + if cfg.name == "from" { + if !committed { + commit() + } + if i > 0 { + cleanup(i - 1) + } + } + + // Commit before commands that require state + if strings.Contains(alwaysCommitBefore, cfg.name) && !committed { + commit() + } + + plan = append(plan, cmd) + + // Some commands need immediate commit + if strings.Contains(alwaysCommitAfter, cfg.name) { + commit() + } else if !strings.Contains(neverCommitAfter, cfg.name) { + // Reset the committed state for the rest of commands and + // start collecting them + committed = false + + // If we reached the end of Rockerfile, do the final commit + // As you noticed, the final commit will not happen if the last + // command was TAG, PUSH or FROM + if i == len(commands)-1 { + commit() + } + } + + // Always cleanup at the end + if i == len(commands)-1 && finalCleanup { + cleanup(i) + } + } + + return plan, err +} diff --git a/src/rocker/build/plan_test.go b/src/rocker/build/plan_test.go new file mode 100644 index 00000000..f008ff0d --- /dev/null +++ b/src/rocker/build/plan_test.go @@ -0,0 +1,344 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPlan_Basic(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +`) + + expected := []Command{ + &CommandFrom{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_Run(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +RUN apt-get update +`) + + expected := []Command{ + &CommandFrom{}, + &CommandRun{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvRun(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +ENV version=1.2 +RUN apt-get update +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandRun{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvLast(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFroms(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandCleanup{}, + &CommandFrom{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFromsEnvBetween(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandCleanup{}, + &CommandFrom{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFromsTwoEnvs(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV mode=build +FROM alpine +ENV mode=run +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandCleanup{}, + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagAtTheEnd(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvBeforeTag(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV type=web +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandTag{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagInTheMiddle(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +ENV type=web +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagBeforeFrom(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + &CommandCleanup{}, + &CommandFrom{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_RunBeforeTag(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +RUN apt-get update +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandRun{}, + &CommandCommit{}, + &CommandTag{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_Scratch(t *testing.T) { + p := makePlan(t, ` +FROM scratch +COPY rootfs / +`) + + expected := []Command{ + &CommandFrom{}, + &CommandCopy{}, + &CommandCommit{}, + &CommandCleanup{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_CleanupTaggedFinal(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG dev +`) + + // from, tag, cleanup + c := p[2] + + assert.IsType(t, &CommandCleanup{}, c) + assert.True(t, c.(*CommandCleanup).tagged) + assert.True(t, c.(*CommandCleanup).final) +} + +func TestPlan_CleanupNotTaggedFinal(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV foo=bar +`) + + // from, env, commit, cleanup + c := p[3] + + assert.IsType(t, &CommandCleanup{}, c) + assert.False(t, c.(*CommandCleanup).tagged) + assert.True(t, c.(*CommandCleanup).final) +} + +func TestPlan_CleanupNotTaggedMiddleFrom(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV foo=bar +FROM alpine +`) + + // from, env, commit, cleanup, from, cleanup + c := p[3] + + assert.IsType(t, &CommandCleanup{}, c) + assert.False(t, c.(*CommandCleanup).tagged) + assert.False(t, c.(*CommandCleanup).final) +} + +// internal helpers + +func makePlan(t *testing.T, rockerfileContent string) Plan { + b, _ := makeBuild(t, rockerfileContent, Config{}) + + p, err := NewPlan(b.rockerfile.Commands(), true) + if err != nil { + t.Fatal(err) + } + + return p +} diff --git a/src/rocker/build/rockerfile.go b/src/rocker/build/rockerfile.go index b7fcd39b..35be210d 100644 --- a/src/rocker/build/rockerfile.go +++ b/src/rocker/build/rockerfile.go @@ -18,71 +18,80 @@ package build import ( "bytes" - "encoding/json" + "fmt" "io" - "strings" - + "io/ioutil" + "os" "rocker/parser" + "rocker/template" + "strings" ) -// Parse parses a Rockerfile from an io.Reader and returns AST data structure -func Parse(rockerfileContent io.Reader) (*parser.Node, error) { - node, err := parser.Parse(rockerfileContent) +// Rockerfile represents the data structure of a Rockerfile +type Rockerfile struct { + Name string + Source string + Content string + Vars template.Vars + Funs template.Funs + + rootNode *parser.Node +} + +// NewRockerfileFromFile reads and parses Rockerfile from a file +func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + fd, err := os.Open(name) if err != nil { return nil, err } + defer fd.Close() - return node, nil + return NewRockerfile(name, fd, vars, funs) } -// RockerfileAstToString returns printable AST of the node -func RockerfileAstToString(node *parser.Node) (str string, err error) { - str += node.Value +// NewRockerfile reads parses Rockerfile from an io.Reader +func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + r = &Rockerfile{ + Name: name, + Vars: vars, + Funs: funs, + } - isKeyVal := node.Value == "env" || node.Value == "label" + var ( + source []byte + content *bytes.Buffer + ) - if len(node.Flags) > 0 { - str += " " + strings.Join(node.Flags, " ") + if source, err = ioutil.ReadAll(in); err != nil { + return nil, fmt.Errorf("Failed to read Rockerfile %s, error: %s", name, err) } - if node.Attributes["json"] { - args := []string{} - for n := node.Next; n != nil; n = n.Next { - args = append(args, n.Value) - } - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return str, err - } - str += " " + strings.TrimSpace(buf.String()) - return str, nil + r.Source = string(source) + + if content, err = template.Process(name, bytes.NewReader(source), vars, funs); err != nil { + return nil, err } - for _, n := range node.Children { - children, err := RockerfileAstToString(n) - if err != nil { - return str, err - } - str += children + "\n" + r.Content = content.String() + + // TODO: update parser from Docker + + if r.rootNode, err = parser.Parse(content); err != nil { + return nil, err } - if node.Next != nil { - for n, i := node.Next, 0; n != nil; n, i = n.Next, i+1 { - if len(n.Children) > 0 { - children, err := RockerfileAstToString(n) - if err != nil { - return str, err - } - str += " " + children - } else if isKeyVal && i%2 != 0 { - str += "=" + n.Value - } else { - str += " " + n.Value - } - } + return r, nil +} + +// Commands returns the list of command configurations from the Rockerfile +func (r *Rockerfile) Commands() []ConfigCommand { + commands := []ConfigCommand{} + + for i := 0; i < len(r.rootNode.Children); i++ { + commands = append(commands, parseCommand(r.rootNode.Children[i], false)) } - return strings.TrimSpace(str), nil + return commands } func handleJSONArgs(args []string, attributes map[string]bool) []string { @@ -98,6 +107,49 @@ func handleJSONArgs(args []string, attributes map[string]bool) []string { return []string{strings.Join(args, " ")} } +func parseCommand(node *parser.Node, isOnbuild bool) ConfigCommand { + cfg := ConfigCommand{ + name: node.Value, + attrs: node.Attributes, + original: node.Original, + args: []string{}, + flags: parseFlags(node.Flags), + isOnbuild: isOnbuild, + } + + // fill in args and substitute vars + for n := node.Next; n != nil; n = n.Next { + cfg.args = append(cfg.args, n.Value) + } + + return cfg +} + +func parseOnbuildCommands(onBuildTriggers []string) ([]ConfigCommand, error) { + commands := []ConfigCommand{} + + for _, step := range onBuildTriggers { + + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return commands, err + } + + for _, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return commands, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return commands, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + commands = append(commands, parseCommand(n, true)) + } + } + + return commands, nil +} + func parseFlags(flags []string) map[string]string { result := make(map[string]string) for _, flag := range flags { diff --git a/src/rocker/build/rockerfile_test.go b/src/rocker/build/rockerfile_test.go index 7778f875..517cf147 100644 --- a/src/rocker/build/rockerfile_test.go +++ b/src/rocker/build/rockerfile_test.go @@ -17,96 +17,61 @@ package build import ( - "io/ioutil" - "os" + "rocker/template" "strings" "testing" "github.com/stretchr/testify/assert" ) -func TestConfigParse(t *testing.T) { - t.Parallel() - - fd, err := os.Open("testdata/Rockerfile") - if err != nil { - t.Fatal(err) - } - - node, err := Parse(fd) - if err != nil { - t.Fatal(err) - } - - t.Logf("Node: %v", node.Dump()) - - expected, err := ioutil.ReadFile("testdata/Rockerfile_result") +func TestNewRockerfile_Base(t *testing.T) { + src := `FROM {{ .BaseImage }}` + vars := template.Vars{"BaseImage": "ubuntu"} + r, err := NewRockerfile("test", strings.NewReader(src), vars, template.Funs{}) if err != nil { t.Fatal(err) } - assert.Equal(t, string(expected), node.Dump()+"\n", "invalid AST parsed from Rockerfile") + assert.Equal(t, src, r.Source) + assert.Equal(t, "FROM ubuntu", r.Content) } -func TestConfigRockerfileAstToString_Base(t *testing.T) { - t.Parallel() - - fd, err := os.Open("testdata/Rockerfile") - if err != nil { - t.Fatal(err) - } - - node, err := Parse(fd) +func TestNewRockerfileFromFile(t *testing.T) { + r, err := NewRockerfileFromFile("testdata/Rockerfile", template.Vars{}, template.Funs{}) if err != nil { t.Fatal(err) } - str, err := RockerfileAstToString(node) - if err != nil { - t.Fatal(err) - } - t.Logf("Node String: %v", str) - - expected, err := ioutil.ReadFile("testdata/Rockerfile_string_result") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, string(expected), str+"\n", "invalid Rockerfile dumped to string") + assert.Equal(t, `from "some-java8-image-dev:1"`, r.rootNode.Children[0].Dump()) } -func TestConfigRockerfileAstToString_CmdJson(t *testing.T) { - t.Parallel() - - node, err := Parse(strings.NewReader("FROM scratch\nCMD [\"-\"]\n")) +func TestRockerfileCommands(t *testing.T) { + src := `FROM ubuntu` + r, err := NewRockerfile("test", strings.NewReader(src), template.Vars{}, template.Funs{}) if err != nil { t.Fatal(err) } - str, err := RockerfileAstToString(node) - if err != nil { - t.Fatal(err) - } - t.Logf("Node String: %v", str) - - assert.Equal(t, "from scratch\ncmd [\"-\"]", str, "invalid Rockerfile dumped to string") + commands := r.Commands() + assert.Len(t, commands, 1) + assert.Equal(t, "from", commands[0].name) + assert.Equal(t, "ubuntu", commands[0].args[0]) } -func TestConfigRockerfileAstToString_KeyVals(t *testing.T) { - t.Parallel() - - node, err := Parse(strings.NewReader("FROM scratch\nENV NAME=JOHN\\\n LASTNAME=DOE\nMOUNT a b c\nLABEL ASD QWE SDF")) - if err != nil { - t.Fatal(err) +func TestRockerfileParseOnbuildCommands(t *testing.T) { + triggers := []string{ + "RUN make", + "RUN make install", } - str, err := RockerfileAstToString(node) + commands, err := parseOnbuildCommands(triggers) if err != nil { t.Fatal(err) } - // t.Logf("Node String: %v", str) - // pretty.Println(node) - // t.Logf("Node: %v", node.Dump()) - assert.Equal(t, "from scratch\nenv NAME=JOHN LASTNAME=DOE\nmount a b c\nlabel ASD=QWE SDF", str, "invalid Rockerfile dumped to string") + assert.Len(t, commands, 2) + assert.Equal(t, "run", commands[0].name) + assert.Equal(t, []string{"make"}, commands[0].args) + assert.Equal(t, "run", commands[1].name) + assert.Equal(t, []string{"make install"}, commands[1].args) } diff --git a/src/rocker/build/semver.go b/src/rocker/build/semver.go deleted file mode 100644 index 18427b87..00000000 --- a/src/rocker/build/semver.go +++ /dev/null @@ -1,64 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Light semver implementation, we cannot use 'semver' package because -// it does not export 'version' property that we need here. - -package build - -import ( - "fmt" - "regexp" - "strconv" -) - -var semverRegexp = regexp.MustCompile(`^\bv?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(?:-([\da-z\-]+(?:\.[\da-z\-]+)*))?\b$`) - -// Semver represents a light version of 'semver' data structure -type Semver struct { - Major int - Minor int - Patch int - Suffix string -} - -// NewSemver parses a semver string into the Semver struct -func NewSemver(str string) (semver *Semver, err error) { - matches := semverRegexp.FindAllStringSubmatch(str, -1) - if matches == nil { - return nil, fmt.Errorf("Failed to parse given version as semver: %s", str) - } - - semver = &Semver{} - - if semver.Major, err = strconv.Atoi(matches[0][1]); err != nil { - return nil, err - } - if semver.Minor, err = strconv.Atoi(matches[0][2]); err != nil { - return nil, err - } - if semver.Patch, err = strconv.Atoi(matches[0][3]); err != nil { - return nil, err - } - semver.Suffix = matches[0][4] - - return semver, nil -} - -// HasSuffix returns true if the suffix (such as `-build123`) is present for the version -func (semver *Semver) HasSuffix() bool { - return semver.Suffix != "" -} diff --git a/src/rocker/build/state.go b/src/rocker/build/state.go new file mode 100644 index 00000000..3fcf1085 --- /dev/null +++ b/src/rocker/build/state.go @@ -0,0 +1,81 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "fmt" + "sort" + "strings" + + "github.com/fsouza/go-dockerclient" +) + +// State is the build state +// TODO: document +type State struct { + Config docker.Config + ImageID string + ParentID string + ExportsID string + NoBaseImage bool + ProducedImage bool + InjectCommands []string + Commits []string + + NoCache StateNoCache +} + +// StateNoCache is a struct that cannot be overridden by a cached item +type StateNoCache struct { + Dockerignore []string + CacheBusted bool + CmdSet bool + ContainerID string + HostConfig docker.HostConfig +} + +// NewState makes a fresh state +func NewState(b *Build) State { + s := State{} + s.NoCache.Dockerignore = b.cfg.Dockerignore + return s +} + +// Commit adds a commit to the current state +func (s *State) Commit(msg string, args ...interface{}) *State { + s.Commits = append(s.Commits, fmt.Sprintf(msg, args...)) + sort.Strings(s.Commits) + return s +} + +// CleanCommits resets the commits struct +func (s *State) CleanCommits() *State { + s.Commits = []string{} + return s +} + +// GetCommits returns merged commits string +func (s State) GetCommits() string { + return strings.Join(s.Commits, "; ") +} + +// Equals returns true if the two states are equal +// NOTE: we identify unique commands by commits, so state uniqueness is simply a commit +func (s State) Equals(s2 State) bool { + // TODO: compare other properties? + return s.GetCommits() == s2.GetCommits() +} diff --git a/src/rocker/build/tar.go b/src/rocker/build/tar.go new file mode 100644 index 00000000..831340d6 --- /dev/null +++ b/src/rocker/build/tar.go @@ -0,0 +1,118 @@ +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build + +import ( + "archive/tar" + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system" +) + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} diff --git a/src/rocker/build/tar_unix.go b/src/rocker/build/tar_unix.go new file mode 100644 index 00000000..d0d55e66 --- /dev/null +++ b/src/rocker/build/tar_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build + +import ( + "archive/tar" + "errors" + "os" + "syscall" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/src/rocker/build/tar_windows.go b/src/rocker/build/tar_windows.go new file mode 100644 index 00000000..872ec798 --- /dev/null +++ b/src/rocker/build/tar_windows.go @@ -0,0 +1,56 @@ +// +build windows + +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build + +import ( + "archive/tar" + "fmt" + "os" + "strings" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/src/rocker/build/testdata/Rockerfile_result b/src/rocker/build/testdata/Rockerfile_result deleted file mode 100644 index 03e89cf1..00000000 --- a/src/rocker/build/testdata/Rockerfile_result +++ /dev/null @@ -1,15 +0,0 @@ -(from "some-java8-image-dev:1") -(run "apt-get update && apt-get install -y nodejs npm && npm install -g bower && rm -rf /var/lib/apt/lists/*") -(run "echo \"{ \\\"allow_root\\\": true }\" > /root/.bowerrc") -(run "ln -sf /usr/bin/nodejs /usr/bin/node") -(add ["--user=john" "--ignore-mtime"] "." "/src") -(workdir "/src") -(onbuild (add "." "/")) -(mount "/root/.gradle") -(mount "$GIT_SSH_KEY:/root/.ssh/id_rsa") -(run "gradle --refresh-dependencies --stacktrace clean test") -(export "/src/corgi-app/build/distributions/app.tar") -(from "some-java8-image:1") -(import "app.tar" "/opt") -(cmd "/sbin/my_init" "/opt/app/bin/app") -(push "mycompany/app:$branch-$version") diff --git a/src/rocker/build/testdata/Rockerfile_string_result b/src/rocker/build/testdata/Rockerfile_string_result deleted file mode 100644 index badcb9e8..00000000 --- a/src/rocker/build/testdata/Rockerfile_string_result +++ /dev/null @@ -1,15 +0,0 @@ -from some-java8-image-dev:1 -run apt-get update && apt-get install -y nodejs npm && npm install -g bower && rm -rf /var/lib/apt/lists/* -run echo "{ \"allow_root\": true }" > /root/.bowerrc -run ln -sf /usr/bin/nodejs /usr/bin/node -add --user=john --ignore-mtime . /src -workdir /src -onbuild add [".","/"] -mount /root/.gradle -mount $GIT_SSH_KEY:/root/.ssh/id_rsa -run gradle --refresh-dependencies --stacktrace clean test -export /src/corgi-app/build/distributions/app.tar -from some-java8-image:1 -import app.tar /opt -cmd ["/sbin/my_init","/opt/app/bin/app"] -push mycompany/app:$branch-$version diff --git a/src/rocker/build/util.go b/src/rocker/build/util.go new file mode 100644 index 00000000..d91b5e5f --- /dev/null +++ b/src/rocker/build/util.go @@ -0,0 +1,66 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build + +import ( + "crypto/md5" + "fmt" + "io" +) + +// mountsContainerName returns the name of volume container that will be used for a particular MOUNT +func (b *Build) mountsContainerName(path string) string { + // TODO: mounts are reused between different FROMs, is it ok? + mountID := b.getIdentifier() + ":" + path + return fmt.Sprintf("rocker_mount_%.6x", md5.Sum([]byte(mountID))) +} + +// exportsContainerName return the name of volume container that will be used for EXPORTs +func (b *Build) exportsContainerName() string { + mountID := b.getIdentifier() + return fmt.Sprintf("rocker_exports_%.6x", md5.Sum([]byte(mountID))) +} + +// getIdentifier returns the sequence that is unique to the current Rockerfile +func (b *Build) getIdentifier() string { + if b.cfg.ID != "" { + return b.cfg.ID + } + return b.cfg.ContextDir + ":" + b.rockerfile.Name +} + +// readerVoidCloser is a hack of the improved go-dockerclient's hijacking behavior +// It simply wraps io.Reader (os.Stdin in our case) and discards any Close() call. +// +// It's important because we don't want to close os.Stdin for two reasons: +// 1. We need to restore the terminal back from the raw mode after ATTACH +// 2. There can be other ATTACH instructions for which we need an open stdin +// +// See additional notes in the runContainerAttachStdin() function +type readerVoidCloser struct { + reader io.Reader +} + +// Read reads from current reader +func (r readerVoidCloser) Read(p []byte) (int, error) { + return r.reader.Read(p) +} + +// Close is a viod function, does nothing +func (r readerVoidCloser) Close() error { + return nil +} diff --git a/src/rocker/debugtrap/debugtrap_unix.go b/src/rocker/debugtrap/debugtrap_unix.go new file mode 100644 index 00000000..097ae854 --- /dev/null +++ b/src/rocker/debugtrap/debugtrap_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package debugtrap + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +// SetupDumpStackTrap set up a handler for SIGUSR1 and dumps +// the goroutine stack trace to INFO log +func SetupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/src/rocker/debugtrap/debugtrap_unsupported.go b/src/rocker/debugtrap/debugtrap_unsupported.go new file mode 100644 index 00000000..c640640a --- /dev/null +++ b/src/rocker/debugtrap/debugtrap_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux,!darwin,!freebsd + +package debugtrap + +// SetupDumpStackTrap set up a handler for SIGUSR1 and dumps +// the goroutine stack trace to INFO log +func SetupDumpStackTrap() { + return +} diff --git a/src/rocker/dockerclient/dockerclient.go b/src/rocker/dockerclient/dockerclient.go index 901ac5c9..76d77cfa 100644 --- a/src/rocker/dockerclient/dockerclient.go +++ b/src/rocker/dockerclient/dockerclient.go @@ -25,6 +25,7 @@ import ( "os" "strconv" "strings" + "time" "github.com/codegangsta/cli" "github.com/fsouza/go-dockerclient" @@ -100,6 +101,26 @@ func NewFromCli(c *cli.Context) (*docker.Client, error) { return NewFromConfig(NewConfigFromCli(c)) } +// Ping pings docker client but with timeout +// The problem is that for some reason it's impossible to set the +// default timeout for the go-dockerclient Dialer, need to investigate +func Ping(client *docker.Client, timeoutMs int) error { + var ( + chErr = make(chan error) + timeout = time.Duration(timeoutMs) * time.Millisecond + ) + go func() { + chErr <- client.Ping() + }() + select { + case err := <-chErr: + return err + case <-time.After(timeout): + // TODO: can we kill the ping goroutine? + return fmt.Errorf("Failed to reach docker server, timeout %s", timeout) + } +} + // GlobalCliParams returns global params that configures docker client connection func GlobalCliParams() []cli.Flag { return []cli.Flag{ diff --git a/src/rocker/dockerclient/dockerclient_test.go b/src/rocker/dockerclient/dockerclient_test.go index d3966f40..ddd5e76d 100644 --- a/src/rocker/dockerclient/dockerclient_test.go +++ b/src/rocker/dockerclient/dockerclient_test.go @@ -1,3 +1,5 @@ +// +build integration + /*- * Copyright 2015 Grammarly, Inc. * diff --git a/src/rocker/imagename/artifact.go b/src/rocker/imagename/artifact.go new file mode 100644 index 00000000..7b1dd732 --- /dev/null +++ b/src/rocker/imagename/artifact.go @@ -0,0 +1,62 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package imagename + +import ( + "fmt" + "strings" + + "time" +) + +// Artifact represents the artifact that is the result of image build +// It holds information about the pushed image and may be saved as a file +type Artifact struct { + Name *ImageName `yaml:"Name"` + Pushed bool `yaml:"Pushed"` + Tag string `yaml:"Tag"` + Digest string `yaml:"Digest"` + ImageID string `yaml:"ImageID"` + Addressable string `yaml:"Addressable"` + BuildTime time.Time `yaml:"BuildTime"` +} + +// Artifacts is a collection of Artifact entities +type Artifacts struct { + RockerArtifacts []Artifact `yaml:"RockerArtifacts"` +} + +// GetFileName constructs the base file name out of the image info +func (a *Artifact) GetFileName() string { + imageName := strings.Replace(a.Name.Name, "/", "_", -1) + return fmt.Sprintf("%s_%s.yml", imageName, a.Name.GetTag()) +} + +// Len returns the length of image tags +func (a *Artifacts) Len() int { + return len(a.RockerArtifacts) +} + +// Less returns true if item by index[i] is created after of item[j] +func (a *Artifacts) Less(i, j int) bool { + return a.RockerArtifacts[i].Name.Tag > a.RockerArtifacts[j].Name.Tag +} + +// Swap swaps items by indices [i] and [j] +func (a *Artifacts) Swap(i, j int) { + a.RockerArtifacts[i], a.RockerArtifacts[j] = a.RockerArtifacts[j], a.RockerArtifacts[i] +} diff --git a/src/rocker/imagename/imagename.go b/src/rocker/imagename/imagename.go index 3f6f2c7f..e021d863 100644 --- a/src/rocker/imagename/imagename.go +++ b/src/rocker/imagename/imagename.go @@ -24,7 +24,6 @@ import ( "sort" "strings" - "github.com/go-yaml/yaml" "github.com/wmark/semver" ) @@ -226,6 +225,11 @@ func (img ImageName) Contains(b *ImageName) bool { // ResolveVersion finds an applicable tag for current image among the list of available tags func (img *ImageName) ResolveVersion(list []*ImageName) (result *ImageName) { for _, candidate := range list { + // If these are different images (different names/repos) + if !img.IsSameKind(*candidate) { + continue + } + // If we have a strict equality if img.HasTag() && candidate.HasTag() && img.Tag == candidate.Tag { return candidate @@ -287,8 +291,8 @@ func (img *ImageName) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML serializes ImageName to YAML string -func (img ImageName) MarshalYAML() ([]byte, error) { - return yaml.Marshal(img.String()) +func (img ImageName) MarshalYAML() (interface{}, error) { + return img.String(), nil } // Tags is a structure used for cleaning images diff --git a/src/rocker/imagename/imagename_test.go b/src/rocker/imagename/imagename_test.go index e3374e6c..f9e8cf66 100644 --- a/src/rocker/imagename/imagename_test.go +++ b/src/rocker/imagename/imagename_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/go-yaml/yaml" "github.com/kr/pretty" "github.com/stretchr/testify/assert" @@ -476,3 +477,18 @@ func TestTagsGetOld(t *testing.T) { assert.Equal(t, "hub/ns/name:2", old[1].String(), "bad old image 2") assert.Equal(t, "hub/ns/name:1", old[2].String(), "bad old image 3") } + +func TestImagename_ToYaml(t *testing.T) { + value := struct { + Name *ImageName + }{ + NewFromString("hub/ns/name:1"), + } + + data, err := yaml.Marshal(value) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "name: hub/ns/name:1\n", string(data)) +} diff --git a/src/rocker/imagename/registry.go b/src/rocker/imagename/registry.go index a7bb17aa..f7967421 100644 --- a/src/rocker/imagename/registry.go +++ b/src/rocker/imagename/registry.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "net/http" + "strings" "github.com/fsouza/go-dockerclient" ) @@ -95,8 +96,13 @@ func RegistryListTags(image *ImageName) (images []*ImageName, err error) { // registryListTagsDockerHub lists image tags from hub.docker.com func registryListTagsDockerHub(image *ImageName) (images []*ImageName, err error) { + name := image.Name + if !strings.Contains(name, "/") { + name = "library/" + name + } + tg := registryTags{} - if err = registryGet(fmt.Sprintf("https://hub.docker.com/v2/repositories/library/%s/tags/?page_size=9999&page=1", image.Name), &tg); err != nil { + if err = registryGet(fmt.Sprintf("https://hub.docker.com/v2/repositories/%s/tags/?page_size=9999&page=1", name), &tg); err != nil { return } diff --git a/src/rocker/shellparser/LICENSE b/src/rocker/shellparser/LICENSE new file mode 100644 index 00000000..c7a3f0cf --- /dev/null +++ b/src/rocker/shellparser/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/rocker/shellparser/shellparser.go b/src/rocker/shellparser/shellparser.go new file mode 100644 index 00000000..0b7ccf20 --- /dev/null +++ b/src/rocker/shellparser/shellparser.go @@ -0,0 +1,247 @@ +// NOTICE: it was originally grabbed from the docker source +// without modifications; see LICENSE in the current +// directory from the license and the copyright. + +package shellparser + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "unicode" +) + +type shellWord struct { + word string + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + return sw.process() +} + +func (sw *shellWord) process() (string, error) { + return sw.processStopOn('\000') +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, error) { + var result string + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.pos < len(sw.word) { + ch := sw.peek() + if stopChar != '\000' && ch == stopChar { + sw.next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", err + } + result += tmp + } else { + // Not special, just add it to the result + ch = sw.next() + if ch == '\\' { + // '\' escapes, except end of line + ch = sw.next() + if ch == '\000' { + continue + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) peek() rune { + if sw.pos == len(sw.word) { + return '\000' + } + return rune(sw.word[sw.pos]) +} + +func (sw *shellWord) next() rune { + if sw.pos == len(sw.word) { + return '\000' + } + ch := rune(sw.word[sw.pos]) + sw.pos++ + return ch +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.next() + + for { + ch := sw.next() + if ch == '\000' || ch == '\'' { + break + } + result += string(ch) + } + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.next() + + for sw.pos < len(sw.word) { + ch := sw.peek() + if ch == '"' { + sw.next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.next() + if ch == '\\' { + chNext := sw.peek() + + if chNext == '\000' { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.next() + ch := sw.peek() + if ch == '{' { + sw.next() + name := sw.processName() + ch = sw.peek() + if ch == '}' { + // Normal ${xx} case + sw.next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.next() // skip over : + modifier := sw.next() + + word, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.pos < len(sw.word) { + ch := sw.peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/src/rocker/shellparser/shellparser_test.go b/src/rocker/shellparser/shellparser_test.go new file mode 100644 index 00000000..e2ce5064 --- /dev/null +++ b/src/rocker/shellparser/shellparser_test.go @@ -0,0 +1,55 @@ +// NOTICE: it was originally grabbed from the docker source +// without modifications; see LICENSE in the current +// directory from the license and the copyright. + +package shellparser + +import ( + "bufio" + "os" + "strings" + "testing" +) + +func TestShellParser(t *testing.T) { + file, err := os.Open("testdata/words") + if err != nil { + t.Fatalf("Can't open 'words': %s", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash"} + for scanner.Scan() { + line := scanner.Text() + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in 'words' - should be 2 words:%q", words) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + + newWord, err := ProcessWord(words[0], envs) + + if err != nil { + newWord = "error" + } + + if newWord != words[1] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) + } + } +} diff --git a/src/rocker/shellparser/testdata/words b/src/rocker/shellparser/testdata/words new file mode 100644 index 00000000..1114a7e4 --- /dev/null +++ b/src/rocker/shellparser/testdata/words @@ -0,0 +1,58 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx diff --git a/src/rocker/template/README.md b/src/rocker/template/README.md index 63824297..f7cf4bf3 100644 --- a/src/rocker/template/README.md +++ b/src/rocker/template/README.md @@ -153,6 +153,40 @@ If the `Version` variable is not given, then template processing will fail with Error executing template TEMPLATE_NAME, error: template: TEMPLATE_NAME:1:3: executing \"TEMPLATE_NAME\" at : error calling assert: Assertion failed ``` +### {{ image *docker_image_name_with_tag* }} or {{ image *docker_image_name* *tag* }} +Wrapper that is used to substitute images of particular versions derived by artifacts *(TODO: link to artifacts doc)*. + +Example: +```Dockerfile +FROM {{ image "ubuntu" }} +# OR +FROM {{ image "ubuntu:latest" }} +# OR +FROM {{ image "ubuntu" "latest" }} +``` + +Without any additional arguments it will resolve into this: +```Dockerfile +FROM ubuntu:latest +``` + +But if you have an artifact that is resulted by a previous rocker build, that can be fed back to rocker as variable, the artifact will be substituted: +```yaml +# shorten version of an artifact by rocker +RockerArtifacts: +- Name: ubuntu:latest + Digest: sha256:ead434cd278824865d6e3b67e5d4579ded02eb2e8367fc165efa21138b225f11 +``` + +```Dockerfile +# rocker build -vars artifacts/* +FROM ubuntu@sha256:ead434cd278824865d6e3b67e5d4579ded02eb2e8367fc165efa21138b225f11 +``` + +This feature is useful when you have a continuous integration pipeline and you want to build images on top of each other with guaranteed immutability. Also, this trick can be used with [rocker-compose](https://github.com/grammarly/rocker-compose) to run images of particular versions devired by the artifacts. + +*TODO: also describe semver matching behavior* + # Variables `rocker/template` automatically populates [os.Environ](https://golang.org/pkg/os/#Environ) to the template along with the variables that are passed from the outside. All environment variables are available under `.Env`. diff --git a/src/rocker/template/template.go b/src/rocker/template/template.go index 1681b118..70db1d61 100644 --- a/src/rocker/template/template.go +++ b/src/rocker/template/template.go @@ -24,17 +24,24 @@ import ( "io/ioutil" "os" "reflect" + "rocker/imagename" + "sort" "strconv" "strings" "text/template" "github.com/go-yaml/yaml" "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" ) +// Funs is the list of additional helpers that may be given to the template +type Funs map[string]interface{} + // Process renders config through the template processor. // vars and additional functions are acceptable. -func Process(name string, reader io.Reader, vars Vars, funcs map[string]interface{}) (*bytes.Buffer, error) { +func Process(name string, reader io.Reader, vars Vars, funs Funs) (*bytes.Buffer, error) { var buf bytes.Buffer // read template @@ -58,6 +65,7 @@ func Process(name string, reader io.Reader, vars Vars, funcs map[string]interfac "json": jsonFn, "shell": EscapeShellarg, "yaml": yamlFn, + "image": makeImageHelper(vars), // `image` helper needs to make a closure on Vars // strings functions "compare": strings.Compare, @@ -89,7 +97,7 @@ func Process(name string, reader io.Reader, vars Vars, funcs map[string]interfac "trimSpace": strings.TrimSpace, "trimSuffix": strings.TrimSuffix, } - for k, f := range funcs { + for k, f := range funs { funcMap[k] = f } @@ -236,6 +244,70 @@ func indent(prefix, s string) string { return strings.Join(res, "\n") } +func makeImageHelper(vars Vars) func(string, ...string) (string, error) { + // Sort artifacts so we match semver on latest item + var ( + artifacts = &imagename.Artifacts{} + ok bool + ) + + if artifacts.RockerArtifacts, ok = vars["RockerArtifacts"].([]imagename.Artifact); !ok { + artifacts.RockerArtifacts = []imagename.Artifact{} + } + + sort.Sort(artifacts) + + log.Debugf("`image` helper got artifacts: %# v", pretty.Formatter(artifacts)) + + return func(img string, args ...string) (string, error) { + var ( + matched bool + ok bool + shouldMatch bool + image = imagename.NewFromString(img) + ) + + if len(args) > 0 { + image = imagename.New(img, args[0]) + } + + for _, a := range artifacts.RockerArtifacts { + if !image.IsSameKind(*a.Name) { + continue + } + + if image.HasVersionRange() { + if !image.Contains(a.Name) { + log.Debugf("Skipping artifact %s because it is not suitable for %s", a.Name, image) + continue + } + } else if image.GetTag() != a.Name.GetTag() { + log.Debugf("Skipping artifact %s because it is not suitable for %s", a.Name, image) + continue + } + + if a.Digest != "" { + log.Infof("Apply artifact digest %s for image %s", a.Digest, image) + image.SetTag(a.Digest) + matched = true + break + } + if a.Name.HasTag() { + log.Infof("Apply artifact tag %s for image %s", a.Name.GetTag(), image) + image.SetTag(a.Name.GetTag()) + matched = true + break + } + } + + if shouldMatch, ok = vars["DemandArtifacts"].(bool); ok && shouldMatch && !matched { + return "", fmt.Errorf("Cannot find suitable artifact for image %s", image) + } + + return image.String(), nil + } +} + func interfaceToInt(v interface{}) (int, error) { switch v.(type) { case int: diff --git a/src/rocker/template/template_test.go b/src/rocker/template/template_test.go index 837dfc0f..5d563a53 100644 --- a/src/rocker/template/template_test.go +++ b/src/rocker/template/template_test.go @@ -19,6 +19,7 @@ package template import ( "fmt" "os" + "rocker/imagename" "strings" "testing" @@ -32,6 +33,27 @@ var ( "data": map[string]string{ "foo": "bar", }, + "RockerArtifacts": []imagename.Artifact{ + imagename.Artifact{ + Name: imagename.NewFromString("alpine:3.2"), + Tag: "3.2", + }, + imagename.Artifact{ + Name: imagename.NewFromString("golang:1.5"), + Tag: "1.5", + Digest: "sha256:ead434", + }, + imagename.Artifact{ + Name: imagename.NewFromString("data:master"), + Tag: "master", + Digest: "sha256:fafe14", + }, + imagename.Artifact{ + Name: imagename.NewFromString("ssh:latest"), + Tag: "latest", + Digest: "sha256:ba41cd", + }, + }, } ) @@ -120,6 +142,77 @@ func TestProcess_YamlIndent(t *testing.T) { assert.Equal(t, "key:\n foo: bar\n", processTemplate(t, "key:\n{{ .data | yaml 1 }}")) } +func TestProcess_Image_Simple(t *testing.T) { + tests := []struct { + tpl string + result string + message string + }{ + {"{{ image `debian:7.7` }}", "debian:7.7", "should not alter the tag that is not in artifacts"}, + {"{{ image `debian` `7.7` }}", "debian:7.7", "should be possible to specify tag as a separate argument"}, + {"{{ image `debian` `sha256:afa` }}", "debian@sha256:afa", "should be possible to specify digest as a separate argument"}, + } + + for _, test := range tests { + assert.Equal(t, test.result, processTemplate(t, test.tpl), test.message) + } +} + +func TestProcess_Image_Advanced(t *testing.T) { + tests := []struct { + in string + result string + shouldMatch bool + message string + }{ + {"debian:7.7", "debian:7.7", false, "should not alter the tag that is not in artifacts"}, + {"debian:7.*", "debian:7.*", false, "should not alter the semver tag that is not in artifacts"}, + {"debian", "debian:latest", false, "should not match anything when no tag given (:latest) and no artifact"}, + {"alpine:3.1", "alpine:3.1", false, "should not match artifact with different version"}, + {"alpine:4.1", "alpine:4.1", false, "should not match artifact with different version"}, + {"alpine:3.*", "alpine:3.2", true, "should match artifact with version wildcard"}, + {"alpine", "alpine:latest", false, "should not match artifact when no tag given (:latest by default)"}, + {"alpine:latest", "alpine:latest", false, "should not match on a :latest tag"}, + {"alpine:snapshot", "alpine:snapshot", false, "should not match on a named tag"}, + {"golang:1.5", "golang@sha256:ead434", true, "should match semver tag and use digest"}, + {"golang:1.*", "golang@sha256:ead434", true, "should match on wildcard semver tag and use digest"}, + {"golang:1", "golang@sha256:ead434", true, "should match on prefix semver tag and use digest"}, + {"golang:1.4", "golang:1.4", false, "should not match on different semver tag"}, + {"golang:master", "golang:master", false, "should not match on a named tag"}, + {"data:1.2", "data:1.2", false, "should not match on a version tag against named artifact"}, + {"data:snapshot", "data:snapshot", false, "should not match on a different named tag against named artifact"}, + {"data:master", "data@sha256:fafe14", true, "should match on a same named tag against named artifact"}, + {"ssh:latest", "ssh@sha256:ba41cd", true, "should match on a :latest tag against :latest artifact"}, + {"ssh", "ssh@sha256:ba41cd", true, "should match on non-tagged tag against :latest artifact"}, + {"ssh:master", "ssh:master", false, "should match with other tag against :latest artifact"}, + {"ssh:1.2", "ssh:1.2", false, "should match with semver tag against :latest artifact"}, + } + + for _, test := range tests { + tpl := fmt.Sprintf("{{ image `%s` }}", test.in) + assert.Equal(t, test.result, processTemplate(t, tpl), test.message) + } + + // Now test the same but with DemandArtifact On + configTemplateVars["DemandArtifacts"] = true + defer func() { + configTemplateVars["DemandArtifacts"] = false + }() + + for _, test := range tests { + tpl := fmt.Sprintf("{{ image `%s` }}", test.in) + if test.shouldMatch { + assert.Equal(t, test.result, processTemplate(t, tpl), test.message) + } else { + err := processTemplateReturnError(t, tpl) + assert.Error(t, err, fmt.Sprintf("should give an error for test case: %s", test.message)) + if err != nil { + assert.Contains(t, err.Error(), fmt.Sprintf("Cannot find suitable artifact for image %s", test.in), test.message) + } + } + } +} + func processTemplate(t *testing.T, tpl string) string { result, err := Process("test", strings.NewReader(tpl), configTemplateVars, map[string]interface{}{}) if err != nil { @@ -127,3 +220,8 @@ func processTemplate(t *testing.T, tpl string) string { } return result.String() } + +func processTemplateReturnError(t *testing.T, tpl string) error { + _, err := Process("test", strings.NewReader(tpl), configTemplateVars, map[string]interface{}{}) + return err +} diff --git a/src/rocker/template/vars.go b/src/rocker/template/vars.go index 9231c47d..09bf9c9a 100644 --- a/src/rocker/template/vars.go +++ b/src/rocker/template/vars.go @@ -23,11 +23,15 @@ import ( "os" "path" "path/filepath" + "reflect" "regexp" + "rocker/imagename" "sort" "strings" "github.com/go-yaml/yaml" + + log "github.com/Sirupsen/logrus" ) // Vars describes the data structure of the build variables @@ -37,7 +41,16 @@ type Vars map[string]interface{} func (vars Vars) Merge(varsList ...Vars) Vars { for _, mergeWith := range varsList { for k, v := range mergeWith { - vars[k] = v + // We want to merge slices of the same type by appending them to each other + // instead of overwriting + rv1 := reflect.ValueOf(vars[k]) + rv2 := reflect.ValueOf(v) + + if rv1.Kind() == reflect.Slice && rv2.Kind() == reflect.Slice && rv1.Type() == rv2.Type() { + vars[k] = reflect.AppendSlice(rv1, rv2).Interface() + } else { + vars[k] = v + } } } return vars @@ -91,6 +104,29 @@ func (vars *Vars) UnmarshalJSON(data []byte) (err error) { return nil } +// UnmarshalYAML parses YAML string and returns Vars +func (vars *Vars) UnmarshalYAML(unmarshal func(interface{}) error) (err error) { + // try unmarshal RockerArtifacts type + var artifacts imagename.Artifacts + if err = unmarshal(&artifacts); err != nil { + return err + } + + var value map[string]interface{} + if err = unmarshal(&value); err != nil { + return err + } + + // Fill artifacts if present + if len(artifacts.RockerArtifacts) > 0 { + value["RockerArtifacts"] = artifacts.RockerArtifacts + } + + *vars = value + + return nil +} + // VarsFromStrings parses Vars through ParseKvPairs and then loads content from files // for vars values with "@" prefix func VarsFromStrings(pairs []string) (vars Vars, err error) { @@ -117,6 +153,7 @@ func VarsFromStrings(pairs []string) (vars Vars, err error) { // VarsFromFile reads variables from either JSON or YAML file func VarsFromFile(filename string) (vars Vars, err error) { + log.Debugf("Load vars from file %s", filename) if filename, err = resolveFileName(filename); err != nil { return nil, err @@ -144,13 +181,31 @@ func VarsFromFile(filename string) (vars Vars, err error) { } // VarsFromFileMulti reads multiple files and merge vars -func VarsFromFileMulti(files []string) (vars Vars, err error) { - varsList := make([]Vars, len(files)) - for i, f := range files { - if varsList[i], err = VarsFromFile(f); err != nil { - return nil, err +func VarsFromFileMulti(files []string) (Vars, error) { + var ( + varsList = []Vars{} + matches []string + vars Vars + err error + ) + + for _, pat := range files { + matches = []string{pat} + + if containsWildcards(pat) { + if matches, err = filepath.Glob(pat); err != nil { + return nil, err + } + } + + for _, f := range matches { + if vars, err = VarsFromFile(f); err != nil { + return nil, err + } + varsList = append(varsList, vars) } } + return Vars{}.Merge(varsList...), nil } @@ -225,3 +280,15 @@ func (vars Vars) ReplaceString(str string) string { return str } + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/src/rocker/template/vars_test.go b/src/rocker/template/vars_test.go index 6408cf37..4fdcca07 100644 --- a/src/rocker/template/vars_test.go +++ b/src/rocker/template/vars_test.go @@ -22,12 +22,25 @@ import ( "io/ioutil" "os" "path" + "rocker/imagename" "rocker/test" "testing" "github.com/stretchr/testify/assert" ) +func TestVars_MergeSlices(t *testing.T) { + v1 := Vars{ + "fruits": []string{"banana", "apple"}, + } + v2 := Vars{ + "fruits": []string{"pear", "orange"}, + } + v3 := v1.Merge(v2) + + assert.Equal(t, []string{"banana", "apple", "pear", "orange"}, v3["fruits"].([]string)) +} + func TestVarsToStrings(t *testing.T) { t.Parallel() @@ -100,6 +113,8 @@ func TestVarsFromStrings(t *testing.T) { } } +// TODO: test VarsFromFileMulti + func TestVarsFromFile_Yaml(t *testing.T) { tempDir, rm := tplMkFiles(t, map[string]string{ "vars.yml": ` @@ -118,6 +133,28 @@ Bar: yes assert.Equal(t, true, vars["Bar"]) } +func TestVarsFromFile_Yaml_Artifacts(t *testing.T) { + tempDir, rm := tplMkFiles(t, map[string]string{ + "vars.yml": ` +Foo: x +Bar: yes +RockerArtifacts: +- Name: golang:1.5 + Tag: "1.5" +`, + }) + defer rm() + + vars, err := VarsFromFile(tempDir + "/vars.yml") + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "x", vars["Foo"]) + assert.Equal(t, true, vars["Bar"]) + assert.IsType(t, []imagename.Artifact{}, vars["RockerArtifacts"]) +} + func TestVarsFromFile_Json(t *testing.T) { tempDir, rm := tplMkFiles(t, map[string]string{ "vars.json": ` diff --git a/src/rocker/textformatter/textformatter.go b/src/rocker/textformatter/textformatter.go new file mode 100644 index 00000000..18c74c77 --- /dev/null +++ b/src/rocker/textformatter/textformatter.go @@ -0,0 +1,165 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Simon Eskildsen +// NOTE: modified to support no-color mode that is more human readable + +package textformatter + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" + + log "github.com/Sirupsen/logrus" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = log.IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +// TextFormatter is a formatter for logrus that can print colored and uncolored human readable log messages +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +// Format formats log message string, it checks if the output should be colored +// and doest a particular formatting +func (f *TextFormatter) Format(entry *log.Entry) ([]byte, error) { + var keys = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + if f.TimestampFormat == "" { + f.TimestampFormat = log.DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys) + } else { + f.printUncolored(b, entry, keys) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *log.Entry, keys []string) { + var levelColor int + switch entry.Level { + case log.DebugLevel: + levelColor = gray + case log.WarnLevel: + levelColor = yellow + case log.ErrorLevel, log.FatalLevel, log.PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func (f *TextFormatter) printUncolored(b *bytes.Buffer, entry *log.Entry, keys []string) { + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "%s[%04d] %-44s ", levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "%s[%s] %-44s ", levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " %s=%+v", k, v) + } +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data log.Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/src/rocker/util/filepath.go b/src/rocker/util/filepath.go index 346138e3..2e02d92c 100644 --- a/src/rocker/util/filepath.go +++ b/src/rocker/util/filepath.go @@ -18,7 +18,10 @@ package util import ( "fmt" + "os" + "os/user" "path" + "path/filepath" "strings" ) @@ -43,3 +46,33 @@ func ResolvePath(baseDir, subPath string) (resultPath string, err error) { return resultPath, nil } + +// MakeAbsolute makes any path absolute, either according to a HOME or from a working directory +func MakeAbsolute(path string) (result string, err error) { + result = filepath.Clean(path) + if filepath.IsAbs(result) { + return result, nil + } + + if strings.HasPrefix(result, "~/") || result == "~" { + home := os.Getenv("HOME") + + // fallback to system user info + if home == "" { + usr, err := user.Current() + if err != nil { + return "", err + } + home = usr.HomeDir + } + + return home + result[1:], nil + } + + wd, err := os.Getwd() + if err != nil { + return "", err + } + + return filepath.Join(wd, path), nil +} diff --git a/vendor/manifest b/vendor/manifest index db56ad21..2864ed02 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -96,6 +96,54 @@ "revision": "c9ad0ce23f68428421adfc6ced9e6123f54788a5", "branch": "master" }, + { + "importpath": "github.com/stretchr/objx", + "repository": "https://github.com/stretchr/objx", + "revision": "cbeaeb16a013161a98496fad62933b1d21786672", + "branch": "master" + }, + { + "importpath": "github.com/docker/docker/pkg/urlutil", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/urlutil" + }, + { + "importpath": "github.com/docker/docker/pkg/httputils", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/httputils" + }, + { + "importpath": "github.com/docker/docker/pkg/tarsum", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/tarsum" + }, + { + "importpath": "github.com/docker/docker/pkg/nat", + "repository": "https://github.com/docker/docker", + "revision": "b0dc11127ef4fc20261ccc0db03a16b17f7f91c4", + "branch": "master", + "path": "/pkg/nat" + }, + { + "importpath": "github.com/docker/docker/pkg/parsers", + "repository": "https://github.com/docker/docker", + "revision": "b0dc11127ef4fc20261ccc0db03a16b17f7f91c4", + "branch": "master", + "path": "/pkg/parsers" + }, + { + "importpath": "github.com/docker/docker/pkg/fileutils", + "repository": "https://github.com/docker/docker", + "revision": "02ae137b1d309729c32110aac6e315e798ba4f0e", + "branch": "master", + "path": "/pkg/fileutils" + }, { "importpath": "github.com/mitchellh/go-homedir", "repository": "https://github.com/mitchellh/go-homedir", @@ -103,4 +151,4 @@ "branch": "master" } ] -} \ No newline at end of file +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..08b9840c --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,184 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 00000000..b544ffbf --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,402 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 00000000..d7dc4387 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go new file mode 100644 index 00000000..d35d0821 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 00000000..d5cf34e4 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go new file mode 100644 index 00000000..9de433ee --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..bebc8608 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 00000000..e9d05783 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,307 @@ +package httputils + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("A error occured") +} + +// If a an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/nat.go b/vendor/src/github.com/docker/docker/pkg/nat/nat.go new file mode 100644 index 00000000..6595feb0 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/nat.go @@ -0,0 +1,227 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRange(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRange parses the port range string and returns start/end ints +func ParsePortRange(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := parsers.ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + if len(portStr) == 0 { + return 0 + } + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRange(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := parsers.PartParser(portSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIP = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := parsers.ParsePortRange(containerPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = parsers.ParsePortRange(hostPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIP: rawIP, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + } + return exposedPorts, bindings, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go b/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go new file mode 100644 index 00000000..2c71142b --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go @@ -0,0 +1,525 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestParsePortRange(t *testing.T) { + var ( + begin int + end int + err error + ) + + type TestRange struct { + Range string + Begin int + End int + } + validRanges := []TestRange{ + {"1234", 1234, 1234}, + {"1234-1234", 1234, 1234}, + {"1234-1235", 1234, 1235}, + {"8000-9000", 8000, 9000}, + {"0", 0, 0}, + {"0-0", 0, 0}, + } + + for _, r := range validRanges { + begin, end, err = ParsePortRange(r.Range) + + if err != nil || begin != r.Begin { + t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) + } + if err != nil || end != r.End { + t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) + } + } + + invalidRanges := []string{ + "asdf", + "1asdf", + "9000-8000", + "9000-", + "-8000", + "-8000-", + } + + for _, r := range invalidRanges { + begin, end, err = ParsePortRange(r) + + if err == nil || begin != 0 || end != 0 { + t.Fatalf("Parsing port range '%s' succeeded", r) + } + } +} + +func TestPort(t *testing.T) { + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1230") + if err == nil { + t.Fatal("tcp, 1234-1230 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1242") + if err != nil { + t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) + } + + if string(p) != "1234-1242/tcp" { + t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results", proto, port) + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParsePortSpecsWithRange(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { + t.Fatalf("Expect single binding to port %s but found %s", port, bindings) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %d", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %d", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/sort.go b/vendor/src/github.com/docker/docker/pkg/nat/sort.go new file mode 100644 index 00000000..1eb0fedd --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/sort.go @@ -0,0 +1,98 @@ +package nat + +import ( + "sort" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := parsers.ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go b/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go new file mode 100644 index 00000000..88ed9111 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go @@ -0,0 +1,85 @@ +package nat + +import ( + "fmt" + "reflect" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} + +func TestSortPortMap(t *testing.T) { + ports := []Port{ + Port("22/tcp"), + Port("22/udp"), + Port("8000/tcp"), + Port("6379/tcp"), + Port("9999/tcp"), + } + + portMap := PortMap{ + Port("22/tcp"): []PortBinding{ + {}, + }, + Port("8000/tcp"): []PortBinding{ + {}, + }, + Port("6379/tcp"): []PortBinding{ + {}, + {HostIP: "0.0.0.0", HostPort: "32749"}, + }, + Port("9999/tcp"): []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "40000"}, + }, + } + + SortPortMap(ports, portMap) + if !reflect.DeepEqual(ports, []Port{ + Port("9999/tcp"), + Port("6379/tcp"), + Port("8000/tcp"), + Port("22/tcp"), + Port("22/udp"), + }) { + t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) + } + if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "32749"}, + {}, + }) { + t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go new file mode 100644 index 00000000..6c394f16 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go @@ -0,0 +1,134 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "regexp" + "strings" +) + +// Args stores filter arguments as map key:{array of values}. +// It contains a aggregation of the list of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an slice. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {'label': {'label1=1','label2=2'}, 'image.name', {'ubuntu'}} +type Args map[string][]string + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into an string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil { + return nil, err + } + return args, nil +} + +// MatchKVList returns true if the values for the specified field maches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label':{'label1=1','label2=2','label3=3'}} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + +outer: + for _, name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + for k, v := range sources { + if len(testKV) == 1 { + if k == testKV[0] { + continue outer + } + } else if k == testKV[0] && v == testKV[1] { + continue outer + } + } + + return false + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go new file mode 100644 index 00000000..eb9fcef9 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go @@ -0,0 +1,218 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args == nil || len(args) != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valids := map[string]Args{ + `{"key": ["value"]}`: { + "key": {"value"}, + }, + `{"key": ["value1", "value2"]}`: { + "key": {"value1", "value2"}, + }, + `{"key1": ["value1"], "key2": ["value2"]}`: { + "key1": {"value1"}, + "key2": {"value2"}, + }, + } + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + for json, expectedArgs := range valids { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if len(args) != len(expectedArgs) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs { + values := args[key] + sort.Strings(values) + sort.Strings(expectedValues) + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for index, expectedValue := range expectedValues { + if values[index] != expectedValue { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVList(t *testing.T) { + // empty sources + args := Args{ + "created": []string{"today"}, + } + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value1"}, + }: "labels", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"today"}, + }: "created", + &Args{ + "created": []string{"today"}, + "labels": []string{"key4"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value3"}, + }: "labels", + } + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "today", + &Args{ + "created": []string{"to*"}, + }: "created", + &Args{ + "created": []string{"to(.*)"}, + }: "created", + &Args{ + "created": []string{"tod"}, + }: "created", + &Args{ + "created": []string{"anything", "to*"}, + }: "created", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"tomorrow"}, + }: "created", + &Args{ + "created": []string{"to(day"}, + }: "created", + &Args{ + "created": []string{"tom(.*)"}, + }: "created", + &Args{ + "created": []string{"today1"}, + "labels": []string{"today"}, + }: "created", + } + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..a21ba137 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,100 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go new file mode 100644 index 00000000..6a2c2468 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go @@ -0,0 +1,92 @@ +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..85ca250c --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,67 @@ +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..7d12fcbd --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..79c66b32 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..ca8ea8f0 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,44 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go new file mode 100644 index 00000000..b7d54cbb --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -0,0 +1,124 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 00000000..3c86b6af --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 00000000..30b19329 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,198 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "net/url" + "path" + "runtime" + "strconv" + "strings" +) + +// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func ParseDockerDaemonHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// ParseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + hostParts := strings.Split(u.Host, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + defaults := strings.Split(defaultAddr, ":") + if len(defaults) != 3 { + return "", fmt.Errorf("Invalid defaults address format: %s", defaultAddr) + } + + host := hostParts[0] + if host == "" { + host = strings.TrimPrefix(defaults[1], "//") + } + if hostParts[1] == "" { + hostParts[1] = defaults[2] + } + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil +} + +// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb +func ParseRepositoryTag(repos string) (string, string) { + n := strings.Index(repos, "@") + if n >= 0 { + parts := strings.Split(repos, "@") + return parts[0], parts[1] + } + n = strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 00000000..d83722e8 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,240 @@ +package parsers + +import ( + "runtime" + "strings" + "testing" +) + +func TestParseDockerDaemonHost(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + defaultUnix = "/var/run/docker.sock" + defaultHOST = "unix:///var/run/docker.sock" + ) + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", + "tcp": "Invalid bind address format: tcp", + "unix": "Invalid bind address format: unix", + "fd": "Invalid bind address format: fd", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "": defaultHOST, + " ": defaultHOST, + " ": defaultHOST, + "tcp://": defaultHTTPHost, + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + " tcp://:7777/path ": "tcp://127.0.0.1:7777/path", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := ParseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := ParseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } +} + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 00000000..b42983e9 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go new file mode 100644 index 00000000..719f7289 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -0,0 +1,63 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..7c2161c2 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be medled with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 00000000..bb700d8b --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..d2df58c7 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,294 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 00000000..77927ee7 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 00000000..89626660 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,648 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 00000000..48e2af34 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 00000000..dfd5c204 Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json new file mode 100644 index 00000000..af57be01 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar new file mode 100644 index 00000000..880b3f2c Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar new file mode 100644 index 00000000..1c636b3b Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar new file mode 100644 index 00000000..b411be97 Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 00000000..7b5c04a9 Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar new file mode 100644 index 00000000..f8c64586 Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json new file mode 100644 index 00000000..288441a9 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json @@ -0,0 +1 @@ +{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0} \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar new file mode 100644 index 00000000..819351d4 Binary files /dev/null and b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar differ diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 00000000..28822868 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 00000000..88e0a578 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 00000000..f7094b1f --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "udp://", "unix://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go new file mode 100644 index 00000000..bb89d8b5 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -0,0 +1,55 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/vendor/src/github.com/stretchr/objx/LICENSE.md b/vendor/src/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 00000000..21999458 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/src/github.com/stretchr/objx/README.md b/vendor/src/github.com/stretchr/objx/README.md new file mode 100644 index 00000000..4aa18068 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/README.md @@ -0,0 +1,3 @@ +# objx + + * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/vendor/src/github.com/stretchr/objx/accessors.go b/vendor/src/github.com/stretchr/objx/accessors.go new file mode 100644 index 00000000..721bcac7 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/vendor/src/github.com/stretchr/objx/accessors_test.go b/vendor/src/github.com/stretchr/objx/accessors_test.go new file mode 100644 index 00000000..ce5d8e4a --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/accessors_test.go @@ -0,0 +1,145 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccessorsAccessGetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) + +} +func TestAccessorsAccessGetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessGetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessGetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) + + assert.Panics(t, func() { + access(current, "names[2]", nil, false, true) + }) + assert.Nil(t, access(current, "names[2]", nil, false, false)) + +} + +func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsGet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + assert.Equal(t, "Tyler", current.Get("name").data) + +} + +func TestAccessorsAccessSetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + + access(current, "age", 29, true, true) + assert.Equal(t, current["age"], 29) + +} + +func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { + + current := map[string]interface{}{} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + +} + +func TestAccessorsAccessSetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + + access(current, "name.first", "Mat", true, true) + access(current, "name.last", "Ryer", true, true) + + assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessSetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + + access(current, "one.two.three.four", 5, true, true) + + assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessSetArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{"Tyler"}} + + access(current, "names[0]", "Mat", true, true) + + assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) + +} +func TestAccessorsAccessSetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + + access(current, "names[0].first", "Mat", true, true) + access(current, "names[0].last", "Ryer", true, true) + access(current, "names[1].first", "Captain", true, true) + access(current, "names[1].last", "Underpants", true, true) + + assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) + +} + +func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsSet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + current.Set("name", "Mat") + assert.Equal(t, "Mat", current.Get("name").data) + +} diff --git a/vendor/src/github.com/stretchr/objx/codegen/array-access.txt b/vendor/src/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 00000000..30602347 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/vendor/src/github.com/stretchr/objx/codegen/index.html b/vendor/src/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 00000000..379ffc3c --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/vendor/src/github.com/stretchr/objx/codegen/template.txt b/vendor/src/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 00000000..b396900b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/codegen/types_list.txt b/vendor/src/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 00000000..069d43d8 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/vendor/src/github.com/stretchr/objx/constants.go b/vendor/src/github.com/stretchr/objx/constants.go new file mode 100644 index 00000000..f9eb42a2 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/src/github.com/stretchr/objx/conversions.go b/vendor/src/github.com/stretchr/objx/conversions.go new file mode 100644 index 00000000..9cdfa9f9 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/src/github.com/stretchr/objx/conversions_test.go b/vendor/src/github.com/stretchr/objx/conversions_test.go new file mode 100644 index 00000000..e9ccd298 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/conversions_test.go @@ -0,0 +1,94 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConversionJSON(t *testing.T) { + + jsonString := `{"name":"Mat"}` + o := MustFromJSON(jsonString) + + result, err := o.JSON() + + if assert.NoError(t, err) { + assert.Equal(t, jsonString, result) + } + + assert.Equal(t, jsonString, o.MustJSON()) + +} + +func TestConversionJSONWithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustJSON() + }) + + _, err := o.JSON() + + assert.Error(t, err) + +} + +func TestConversionBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.Base64() + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) + +} + +func TestConversionBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustBase64() + }) + + _, err := o.Base64() + + assert.Error(t, err) + +} + +func TestConversionSignedBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.SignedBase64("key") + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) + +} + +func TestConversionSignedBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustSignedBase64("key") + }) + + _, err := o.SignedBase64("key") + + assert.Error(t, err) + +} diff --git a/vendor/src/github.com/stretchr/objx/doc.go b/vendor/src/github.com/stretchr/objx/doc.go new file mode 100644 index 00000000..47bf85e4 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/vendor/src/github.com/stretchr/objx/fixture_test.go b/vendor/src/github.com/stretchr/objx/fixture_test.go new file mode 100644 index 00000000..27f7d904 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/fixture_test.go @@ -0,0 +1,98 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +var fixtures = []struct { + // name is the name of the fixture (used for reporting + // failures) + name string + // data is the JSON data to be worked on + data string + // get is the argument(s) to pass to Get + get interface{} + // output is the expected output + output interface{} +}{ + { + name: "Simple get", + data: `{"name": "Mat"}`, + get: "name", + output: "Mat", + }, + { + name: "Get with dot notation", + data: `{"address": {"city": "Boulder"}}`, + get: "address.city", + output: "Boulder", + }, + { + name: "Deep get with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.two.three.four", + output: "hello", + }, + { + name: "Get missing with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.ten", + output: nil, + }, + { + name: "Get with array notation", + data: `{"tags": ["one", "two", "three"]}`, + get: "tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation", + data: `{"types": { "tags": ["one", "two", "three"]}}`, + get: "types.tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation - field after array", + data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, + get: "tags[1].name", + output: "two", + }, + { + name: "Complex get with array and dot notation", + data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, + get: "tags[0].list[0].one", + output: "pizza", + }, + { + name: "Get field from within string should be nil", + data: `{"name":"Tyler"}`, + get: "name.something", + output: nil, + }, + { + name: "Get field from within string (using array accessor) should be nil", + data: `{"numbers":["one", "two", "three"]}`, + get: "numbers[0].nope", + output: nil, + }, +} + +func TestFixtures(t *testing.T) { + + for _, fixture := range fixtures { + + m := MustFromJSON(fixture.data) + + // get the value + t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) + value := m.Get(fixture.get.(string)) + + // make sure it matches + assert.Equal(t, fixture.output, value.data, + "Get fixture \"%s\" failed: %v", fixture.name, fixture, + ) + + } + +} diff --git a/vendor/src/github.com/stretchr/objx/map.go b/vendor/src/github.com/stretchr/objx/map.go new file mode 100644 index 00000000..eb6ed8e2 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/vendor/src/github.com/stretchr/objx/map_for_test.go b/vendor/src/github.com/stretchr/objx/map_for_test.go new file mode 100644 index 00000000..6beb5067 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map_for_test.go @@ -0,0 +1,10 @@ +package objx + +var TestMap map[string]interface{} = map[string]interface{}{ + "name": "Tyler", + "address": map[string]interface{}{ + "city": "Salt Lake City", + "state": "UT", + }, + "numbers": []interface{}{"one", "two", "three", "four", "five"}, +} diff --git a/vendor/src/github.com/stretchr/objx/map_test.go b/vendor/src/github.com/stretchr/objx/map_test.go new file mode 100644 index 00000000..1f8b45c6 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map_test.go @@ -0,0 +1,147 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +type Convertable struct { + name string +} + +func (c *Convertable) MSI() map[string]interface{} { + return map[string]interface{}{"name": c.name} +} + +type Unconvertable struct { + name string +} + +func TestMapCreation(t *testing.T) { + + o := New(nil) + assert.Nil(t, o) + + o = New("Tyler") + assert.Nil(t, o) + + unconvertable := &Unconvertable{name: "Tyler"} + o = New(unconvertable) + assert.Nil(t, o) + + convertable := &Convertable{name: "Tyler"} + o = New(convertable) + if assert.NotNil(t, convertable) { + assert.Equal(t, "Tyler", o["name"], "Tyler") + } + + o = MSI() + if assert.NotNil(t, o) { + assert.NotNil(t, o) + } + + o = MSI("name", "Tyler") + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, o["name"], "Tyler") + } + } + +} + +func TestMapMustFromJSONWithError(t *testing.T) { + + _, err := FromJSON(`"name":"Mat"}`) + assert.Error(t, err) + +} + +func TestMapFromJSON(t *testing.T) { + + o := MustFromJSON(`{"name":"Mat"}`) + + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, "Mat", o["name"]) + } + } + +} + +func TestMapFromJSONWithError(t *testing.T) { + + var m Map + + assert.Panics(t, func() { + m = MustFromJSON(`"name":"Mat"}`) + }) + + assert.Nil(t, m) + +} + +func TestMapFromBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=" + + o, err := FromBase64(base64String) + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") + +} + +func TestMapFromBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWFasd0In0=" + + _, err := FromBase64(base64String) + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromBase64(base64String) + }) + +} + +func TestMapFromSignedBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + o, err := FromSignedBase64(base64String, "key") + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") + +} + +func TestMapFromSignedBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + _, err := FromSignedBase64(base64String, "key") + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromSignedBase64(base64String, "key") + }) + +} + +func TestMapFromURLQuery(t *testing.T) { + + m, err := FromURLQuery("name=tyler&state=UT") + if assert.NoError(t, err) && assert.NotNil(t, m) { + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/mutations.go b/vendor/src/github.com/stretchr/objx/mutations.go new file mode 100644 index 00000000..b35c8639 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/vendor/src/github.com/stretchr/objx/mutations_test.go b/vendor/src/github.com/stretchr/objx/mutations_test.go new file mode 100644 index 00000000..e20ee23b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/mutations_test.go @@ -0,0 +1,77 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestExclude(t *testing.T) { + + d := make(Map) + d["name"] = "Mat" + d["age"] = 29 + d["secret"] = "ABC" + + excluded := d.Exclude([]string{"secret"}) + + assert.Equal(t, d["name"], excluded["name"]) + assert.Equal(t, d["age"], excluded["age"]) + assert.False(t, excluded.Has("secret"), "secret should be excluded") + +} + +func TestCopy(t *testing.T) { + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + d1Obj := New(d1) + d2Obj := d1Obj.Copy() + + d2Obj["name"] = "Mat" + + assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") + assert.Equal(t, d2Obj.Get("name").Str(), "Mat") + +} + +func TestMerge(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.Merge(d1Obj) + + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Empty(t, dObj.Get("location").Str()) + +} + +func TestMergeHere(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.MergeHere(d1Obj) + + assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) +} diff --git a/vendor/src/github.com/stretchr/objx/security.go b/vendor/src/github.com/stretchr/objx/security.go new file mode 100644 index 00000000..fdd6be9c --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/src/github.com/stretchr/objx/security_test.go b/vendor/src/github.com/stretchr/objx/security_test.go new file mode 100644 index 00000000..8f0898f6 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/security_test.go @@ -0,0 +1,12 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHashWithKey(t *testing.T) { + + assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) + +} diff --git a/vendor/src/github.com/stretchr/objx/simple_example_test.go b/vendor/src/github.com/stretchr/objx/simple_example_test.go new file mode 100644 index 00000000..5408c7fd --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/simple_example_test.go @@ -0,0 +1,41 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSimpleExample(t *testing.T) { + + // build a map from a JSON object + o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) + + // Map can be used as a straight map[string]interface{} + assert.Equal(t, o["name"], "Mat") + + // Get an Value object + v := o.Get("name") + assert.Equal(t, v, &Value{data: "Mat"}) + + // Test the contained value + assert.False(t, v.IsInt()) + assert.False(t, v.IsBool()) + assert.True(t, v.IsStr()) + + // Get the contained value + assert.Equal(t, v.Str(), "Mat") + + // Get a default value if the contained value is not of the expected type or does not exist + assert.Equal(t, 1, v.Int(1)) + + // Get a value by using array notation + assert.Equal(t, "indian", o.Get("foods[0]").Data()) + + // Set a value by using array notation + o.Set("foods[0]", "italian") + assert.Equal(t, "italian", o.Get("foods[0]").Str()) + + // Get a value by using dot notation + assert.Equal(t, "hobbiton", o.Get("location.county").Str()) + +} diff --git a/vendor/src/github.com/stretchr/objx/tests.go b/vendor/src/github.com/stretchr/objx/tests.go new file mode 100644 index 00000000..d9e0b479 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/src/github.com/stretchr/objx/tests_test.go b/vendor/src/github.com/stretchr/objx/tests_test.go new file mode 100644 index 00000000..bcc1eb03 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/tests_test.go @@ -0,0 +1,24 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHas(t *testing.T) { + + m := New(TestMap) + + assert.True(t, m.Has("name")) + assert.True(t, m.Has("address.state")) + assert.True(t, m.Has("numbers[4]")) + + assert.False(t, m.Has("address.state.nope")) + assert.False(t, m.Has("address.nope")) + assert.False(t, m.Has("nope")) + assert.False(t, m.Has("numbers[5]")) + + m = nil + assert.False(t, m.Has("nothing")) + +} diff --git a/vendor/src/github.com/stretchr/objx/type_specific_codegen.go b/vendor/src/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 00000000..f3ecb29b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go new file mode 100644 index 00000000..f7a4fcee --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go @@ -0,0 +1,2867 @@ +package objx + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInter(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Inter()) + assert.Equal(t, val, New(m).Get("value").MustInter()) + assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) + assert.Equal(t, val, New(m).Get("nothing").Inter("something")) + + assert.Panics(t, func() { + New(m).Get("age").MustInter() + }) + +} + +func TestInterSlice(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) + assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) + assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInterSlice() + }) + +} + +func TestIsInter(t *testing.T) { + + var v *Value + + v = &Value{data: interface{}("something")} + assert.True(t, v.IsInter()) + + v = &Value{data: []interface{}{interface{}("something")}} + assert.True(t, v.IsInterSlice()) + +} + +func TestEachInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + count := 0 + replacedVals := make([]interface{}, 0) + assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) + +} + +func TestWhereInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + selected := v.WhereInter(func(i int, val interface{}) bool { + return i%2 == 0 + }).MustInterSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + grouped := v.GroupInter(func(i int, val interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + rawArr := v.MustInterSlice() + + replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInterSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + collected := v.CollectInter(func(index int, val interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestMSI(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSI()) + assert.Equal(t, val, New(m).Get("value").MustMSI()) + assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) + assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) + + assert.Panics(t, func() { + New(m).Get("age").MustMSI() + }) + +} + +func TestMSISlice(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) + assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) + assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustMSISlice() + }) + +} + +func TestIsMSI(t *testing.T) { + + var v *Value + + v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} + assert.True(t, v.IsMSI()) + + v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + assert.True(t, v.IsMSISlice()) + +} + +func TestEachMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + count := 0 + replacedVals := make([]map[string]interface{}, 0) + assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) + assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) + assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) + +} + +func TestWhereMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + rawArr := v.MustMSISlice() + + replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustMSISlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestObjxMap(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMap()) + assert.Equal(t, val, New(m).Get("value").MustObjxMap()) + assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) + + assert.Panics(t, func() { + New(m).Get("age").MustObjxMap() + }) + +} + +func TestObjxMapSlice(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) + assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustObjxMapSlice() + }) + +} + +func TestIsObjxMap(t *testing.T) { + + var v *Value + + v = &Value{data: (Map)(New(1))} + assert.True(t, v.IsObjxMap()) + + v = &Value{data: [](Map){(Map)(New(1))}} + assert.True(t, v.IsObjxMapSlice()) + +} + +func TestEachObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + count := 0 + replacedVals := make([](Map), 0) + assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) + +} + +func TestWhereObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + selected := v.WhereObjxMap(func(i int, val Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + grouped := v.GroupObjxMap(func(i int, val Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][](Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + rawArr := v.MustObjxMapSlice() + + replaced := v.ReplaceObjxMap(func(index int, val Map) Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustObjxMapSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + collected := v.CollectObjxMap(func(index int, val Map) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestBool(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Bool()) + assert.Equal(t, val, New(m).Get("value").MustBool()) + assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) + assert.Equal(t, val, New(m).Get("nothing").Bool(true)) + + assert.Panics(t, func() { + New(m).Get("age").MustBool() + }) + +} + +func TestBoolSlice(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": []bool{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) + assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) + assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustBoolSlice() + }) + +} + +func TestIsBool(t *testing.T) { + + var v *Value + + v = &Value{data: bool(true)} + assert.True(t, v.IsBool()) + + v = &Value{data: []bool{bool(true)}} + assert.True(t, v.IsBoolSlice()) + +} + +func TestEachBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} + count := 0 + replacedVals := make([]bool, 0) + assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) + +} + +func TestWhereBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + selected := v.WhereBool(func(i int, val bool) bool { + return i%2 == 0 + }).MustBoolSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + grouped := v.GroupBool(func(i int, val bool) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]bool) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + rawArr := v.MustBoolSlice() + + replaced := v.ReplaceBool(func(index int, val bool) bool { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustBoolSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + collected := v.CollectBool(func(index int, val bool) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestStr(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Str()) + assert.Equal(t, val, New(m).Get("value").MustStr()) + assert.Equal(t, string(""), New(m).Get("nothing").Str()) + assert.Equal(t, val, New(m).Get("nothing").Str("hello")) + + assert.Panics(t, func() { + New(m).Get("age").MustStr() + }) + +} + +func TestStrSlice(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": []string{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) + assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) + assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustStrSlice() + }) + +} + +func TestIsStr(t *testing.T) { + + var v *Value + + v = &Value{data: string("hello")} + assert.True(t, v.IsStr()) + + v = &Value{data: []string{string("hello")}} + assert.True(t, v.IsStrSlice()) + +} + +func TestEachStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + count := 0 + replacedVals := make([]string, 0) + assert.Equal(t, v, v.EachStr(func(i int, val string) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) + +} + +func TestWhereStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + selected := v.WhereStr(func(i int, val string) bool { + return i%2 == 0 + }).MustStrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + grouped := v.GroupStr(func(i int, val string) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]string) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + rawArr := v.MustStrSlice() + + replaced := v.ReplaceStr(func(index int, val string) string { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustStrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + collected := v.CollectStr(func(index int, val string) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int()) + assert.Equal(t, val, New(m).Get("value").MustInt()) + assert.Equal(t, int(0), New(m).Get("nothing").Int()) + assert.Equal(t, val, New(m).Get("nothing").Int(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt() + }) + +} + +func TestIntSlice(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": []int{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) + assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) + assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustIntSlice() + }) + +} + +func TestIsInt(t *testing.T) { + + var v *Value + + v = &Value{data: int(1)} + assert.True(t, v.IsInt()) + + v = &Value{data: []int{int(1)}} + assert.True(t, v.IsIntSlice()) + +} + +func TestEachInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} + count := 0 + replacedVals := make([]int, 0) + assert.Equal(t, v, v.EachInt(func(i int, val int) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) + +} + +func TestWhereInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + selected := v.WhereInt(func(i int, val int) bool { + return i%2 == 0 + }).MustIntSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + grouped := v.GroupInt(func(i int, val int) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + rawArr := v.MustIntSlice() + + replaced := v.ReplaceInt(func(index int, val int) int { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustIntSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + collected := v.CollectInt(func(index int, val int) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt8(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8()) + assert.Equal(t, val, New(m).Get("value").MustInt8()) + assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) + assert.Equal(t, val, New(m).Get("nothing").Int8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt8() + }) + +} + +func TestInt8Slice(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": []int8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) + assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt8Slice() + }) + +} + +func TestIsInt8(t *testing.T) { + + var v *Value + + v = &Value{data: int8(1)} + assert.True(t, v.IsInt8()) + + v = &Value{data: []int8{int8(1)}} + assert.True(t, v.IsInt8Slice()) + +} + +func TestEachInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} + count := 0 + replacedVals := make([]int8, 0) + assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) + +} + +func TestWhereInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + selected := v.WhereInt8(func(i int, val int8) bool { + return i%2 == 0 + }).MustInt8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + grouped := v.GroupInt8(func(i int, val int8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + rawArr := v.MustInt8Slice() + + replaced := v.ReplaceInt8(func(index int, val int8) int8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + collected := v.CollectInt8(func(index int, val int8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt16(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16()) + assert.Equal(t, val, New(m).Get("value").MustInt16()) + assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) + assert.Equal(t, val, New(m).Get("nothing").Int16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt16() + }) + +} + +func TestInt16Slice(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": []int16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) + assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt16Slice() + }) + +} + +func TestIsInt16(t *testing.T) { + + var v *Value + + v = &Value{data: int16(1)} + assert.True(t, v.IsInt16()) + + v = &Value{data: []int16{int16(1)}} + assert.True(t, v.IsInt16Slice()) + +} + +func TestEachInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} + count := 0 + replacedVals := make([]int16, 0) + assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) + +} + +func TestWhereInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + selected := v.WhereInt16(func(i int, val int16) bool { + return i%2 == 0 + }).MustInt16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + grouped := v.GroupInt16(func(i int, val int16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + rawArr := v.MustInt16Slice() + + replaced := v.ReplaceInt16(func(index int, val int16) int16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + collected := v.CollectInt16(func(index int, val int16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt32(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32()) + assert.Equal(t, val, New(m).Get("value").MustInt32()) + assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) + assert.Equal(t, val, New(m).Get("nothing").Int32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt32() + }) + +} + +func TestInt32Slice(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": []int32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) + assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt32Slice() + }) + +} + +func TestIsInt32(t *testing.T) { + + var v *Value + + v = &Value{data: int32(1)} + assert.True(t, v.IsInt32()) + + v = &Value{data: []int32{int32(1)}} + assert.True(t, v.IsInt32Slice()) + +} + +func TestEachInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} + count := 0 + replacedVals := make([]int32, 0) + assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) + +} + +func TestWhereInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + selected := v.WhereInt32(func(i int, val int32) bool { + return i%2 == 0 + }).MustInt32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + grouped := v.GroupInt32(func(i int, val int32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + rawArr := v.MustInt32Slice() + + replaced := v.ReplaceInt32(func(index int, val int32) int32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + collected := v.CollectInt32(func(index int, val int32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt64(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64()) + assert.Equal(t, val, New(m).Get("value").MustInt64()) + assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) + assert.Equal(t, val, New(m).Get("nothing").Int64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt64() + }) + +} + +func TestInt64Slice(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": []int64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) + assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt64Slice() + }) + +} + +func TestIsInt64(t *testing.T) { + + var v *Value + + v = &Value{data: int64(1)} + assert.True(t, v.IsInt64()) + + v = &Value{data: []int64{int64(1)}} + assert.True(t, v.IsInt64Slice()) + +} + +func TestEachInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} + count := 0 + replacedVals := make([]int64, 0) + assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) + +} + +func TestWhereInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + selected := v.WhereInt64(func(i int, val int64) bool { + return i%2 == 0 + }).MustInt64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + grouped := v.GroupInt64(func(i int, val int64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + rawArr := v.MustInt64Slice() + + replaced := v.ReplaceInt64(func(index int, val int64) int64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + collected := v.CollectInt64(func(index int, val int64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint()) + assert.Equal(t, val, New(m).Get("value").MustUint()) + assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) + assert.Equal(t, val, New(m).Get("nothing").Uint(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint() + }) + +} + +func TestUintSlice(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": []uint{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) + assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintSlice() + }) + +} + +func TestIsUint(t *testing.T) { + + var v *Value + + v = &Value{data: uint(1)} + assert.True(t, v.IsUint()) + + v = &Value{data: []uint{uint(1)}} + assert.True(t, v.IsUintSlice()) + +} + +func TestEachUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} + count := 0 + replacedVals := make([]uint, 0) + assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) + +} + +func TestWhereUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + selected := v.WhereUint(func(i int, val uint) bool { + return i%2 == 0 + }).MustUintSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + grouped := v.GroupUint(func(i int, val uint) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + rawArr := v.MustUintSlice() + + replaced := v.ReplaceUint(func(index int, val uint) uint { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + collected := v.CollectUint(func(index int, val uint) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint8(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8()) + assert.Equal(t, val, New(m).Get("value").MustUint8()) + assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) + assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint8() + }) + +} + +func TestUint8Slice(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) + assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint8Slice() + }) + +} + +func TestIsUint8(t *testing.T) { + + var v *Value + + v = &Value{data: uint8(1)} + assert.True(t, v.IsUint8()) + + v = &Value{data: []uint8{uint8(1)}} + assert.True(t, v.IsUint8Slice()) + +} + +func TestEachUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + count := 0 + replacedVals := make([]uint8, 0) + assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) + +} + +func TestWhereUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + selected := v.WhereUint8(func(i int, val uint8) bool { + return i%2 == 0 + }).MustUint8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + grouped := v.GroupUint8(func(i int, val uint8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + rawArr := v.MustUint8Slice() + + replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + collected := v.CollectUint8(func(index int, val uint8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint16(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16()) + assert.Equal(t, val, New(m).Get("value").MustUint16()) + assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) + assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint16() + }) + +} + +func TestUint16Slice(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) + assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint16Slice() + }) + +} + +func TestIsUint16(t *testing.T) { + + var v *Value + + v = &Value{data: uint16(1)} + assert.True(t, v.IsUint16()) + + v = &Value{data: []uint16{uint16(1)}} + assert.True(t, v.IsUint16Slice()) + +} + +func TestEachUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + count := 0 + replacedVals := make([]uint16, 0) + assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) + +} + +func TestWhereUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + selected := v.WhereUint16(func(i int, val uint16) bool { + return i%2 == 0 + }).MustUint16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + grouped := v.GroupUint16(func(i int, val uint16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + rawArr := v.MustUint16Slice() + + replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + collected := v.CollectUint16(func(index int, val uint16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint32(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32()) + assert.Equal(t, val, New(m).Get("value").MustUint32()) + assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) + assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint32() + }) + +} + +func TestUint32Slice(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) + assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint32Slice() + }) + +} + +func TestIsUint32(t *testing.T) { + + var v *Value + + v = &Value{data: uint32(1)} + assert.True(t, v.IsUint32()) + + v = &Value{data: []uint32{uint32(1)}} + assert.True(t, v.IsUint32Slice()) + +} + +func TestEachUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + count := 0 + replacedVals := make([]uint32, 0) + assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) + +} + +func TestWhereUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + selected := v.WhereUint32(func(i int, val uint32) bool { + return i%2 == 0 + }).MustUint32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + grouped := v.GroupUint32(func(i int, val uint32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + rawArr := v.MustUint32Slice() + + replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + collected := v.CollectUint32(func(index int, val uint32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint64(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64()) + assert.Equal(t, val, New(m).Get("value").MustUint64()) + assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) + assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint64() + }) + +} + +func TestUint64Slice(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) + assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint64Slice() + }) + +} + +func TestIsUint64(t *testing.T) { + + var v *Value + + v = &Value{data: uint64(1)} + assert.True(t, v.IsUint64()) + + v = &Value{data: []uint64{uint64(1)}} + assert.True(t, v.IsUint64Slice()) + +} + +func TestEachUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + count := 0 + replacedVals := make([]uint64, 0) + assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) + +} + +func TestWhereUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + selected := v.WhereUint64(func(i int, val uint64) bool { + return i%2 == 0 + }).MustUint64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + grouped := v.GroupUint64(func(i int, val uint64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + rawArr := v.MustUint64Slice() + + replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + collected := v.CollectUint64(func(index int, val uint64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUintptr(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uintptr()) + assert.Equal(t, val, New(m).Get("value").MustUintptr()) + assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) + assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUintptr() + }) + +} + +func TestUintptrSlice(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) + assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintptrSlice() + }) + +} + +func TestIsUintptr(t *testing.T) { + + var v *Value + + v = &Value{data: uintptr(1)} + assert.True(t, v.IsUintptr()) + + v = &Value{data: []uintptr{uintptr(1)}} + assert.True(t, v.IsUintptrSlice()) + +} + +func TestEachUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + count := 0 + replacedVals := make([]uintptr, 0) + assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) + +} + +func TestWhereUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + selected := v.WhereUintptr(func(i int, val uintptr) bool { + return i%2 == 0 + }).MustUintptrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + grouped := v.GroupUintptr(func(i int, val uintptr) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uintptr) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + rawArr := v.MustUintptrSlice() + + replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintptrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + collected := v.CollectUintptr(func(index int, val uintptr) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat32(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32()) + assert.Equal(t, val, New(m).Get("value").MustFloat32()) + assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) + assert.Equal(t, val, New(m).Get("nothing").Float32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat32() + }) + +} + +func TestFloat32Slice(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": []float32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) + assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat32Slice() + }) + +} + +func TestIsFloat32(t *testing.T) { + + var v *Value + + v = &Value{data: float32(1)} + assert.True(t, v.IsFloat32()) + + v = &Value{data: []float32{float32(1)}} + assert.True(t, v.IsFloat32Slice()) + +} + +func TestEachFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} + count := 0 + replacedVals := make([]float32, 0) + assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) + +} + +func TestWhereFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + selected := v.WhereFloat32(func(i int, val float32) bool { + return i%2 == 0 + }).MustFloat32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + grouped := v.GroupFloat32(func(i int, val float32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + rawArr := v.MustFloat32Slice() + + replaced := v.ReplaceFloat32(func(index int, val float32) float32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + collected := v.CollectFloat32(func(index int, val float32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat64(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64()) + assert.Equal(t, val, New(m).Get("value").MustFloat64()) + assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) + assert.Equal(t, val, New(m).Get("nothing").Float64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat64() + }) + +} + +func TestFloat64Slice(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": []float64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) + assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat64Slice() + }) + +} + +func TestIsFloat64(t *testing.T) { + + var v *Value + + v = &Value{data: float64(1)} + assert.True(t, v.IsFloat64()) + + v = &Value{data: []float64{float64(1)}} + assert.True(t, v.IsFloat64Slice()) + +} + +func TestEachFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} + count := 0 + replacedVals := make([]float64, 0) + assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) + +} + +func TestWhereFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + selected := v.WhereFloat64(func(i int, val float64) bool { + return i%2 == 0 + }).MustFloat64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + grouped := v.GroupFloat64(func(i int, val float64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + rawArr := v.MustFloat64Slice() + + replaced := v.ReplaceFloat64(func(index int, val float64) float64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + collected := v.CollectFloat64(func(index int, val float64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex64(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64()) + assert.Equal(t, val, New(m).Get("value").MustComplex64()) + assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) + assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex64() + }) + +} + +func TestComplex64Slice(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) + assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex64Slice() + }) + +} + +func TestIsComplex64(t *testing.T) { + + var v *Value + + v = &Value{data: complex64(1)} + assert.True(t, v.IsComplex64()) + + v = &Value{data: []complex64{complex64(1)}} + assert.True(t, v.IsComplex64Slice()) + +} + +func TestEachComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + count := 0 + replacedVals := make([]complex64, 0) + assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) + +} + +func TestWhereComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + selected := v.WhereComplex64(func(i int, val complex64) bool { + return i%2 == 0 + }).MustComplex64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + grouped := v.GroupComplex64(func(i int, val complex64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + rawArr := v.MustComplex64Slice() + + replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + collected := v.CollectComplex64(func(index int, val complex64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex128(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128()) + assert.Equal(t, val, New(m).Get("value").MustComplex128()) + assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) + assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex128() + }) + +} + +func TestComplex128Slice(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) + assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex128Slice() + }) + +} + +func TestIsComplex128(t *testing.T) { + + var v *Value + + v = &Value{data: complex128(1)} + assert.True(t, v.IsComplex128()) + + v = &Value{data: []complex128{complex128(1)}} + assert.True(t, v.IsComplex128Slice()) + +} + +func TestEachComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + count := 0 + replacedVals := make([]complex128, 0) + assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) + +} + +func TestWhereComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + selected := v.WhereComplex128(func(i int, val complex128) bool { + return i%2 == 0 + }).MustComplex128Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + grouped := v.GroupComplex128(func(i int, val complex128) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex128) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + rawArr := v.MustComplex128Slice() + + replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex128Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + collected := v.CollectComplex128(func(index int, val complex128) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/value.go b/vendor/src/github.com/stretchr/objx/value.go new file mode 100644 index 00000000..7aaef06b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/vendor/src/github.com/stretchr/objx/value_test.go b/vendor/src/github.com/stretchr/objx/value_test.go new file mode 100644 index 00000000..0bc65d92 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/value_test.go @@ -0,0 +1 @@ +package objx